In [1]:
import glob
import math
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import random
import sklearn.metrics as metrics

from tensorflow.keras import optimizers
from tensorflow.keras import backend
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import add, concatenate, Conv2D, Dense, Dropout, Flatten, Input, Lambda
from tensorflow.keras.layers import Activation, AveragePooling2D, BatchNormalization, MaxPooling2D, ZeroPadding2D
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import to_categorical


%matplotlib inline
In [2]:
                            # Set up 'ggplot' style
plt.style.use('ggplot')     # if want to use the default style, set 'classic'
plt.rcParams['ytick.right']     = True
plt.rcParams['ytick.labelright']= True
plt.rcParams['ytick.left']      = False
plt.rcParams['ytick.labelleft'] = False
plt.rcParams['font.family']     = 'Arial'
In [3]:
# where am i?
%pwd
Out[3]:
'C:\\Users\\david\\Documents\\ImageNet'
In [4]:
%ls
 Volume in drive C is Acer
 Volume Serial Number is F2E5-64E8

 Directory of C:\Users\david\Documents\ImageNet

09/16/2019  06:31 AM    <DIR>          .
09/16/2019  06:31 AM    <DIR>          ..
09/09/2019  01:02 AM                43 .gitattributes
08/22/2019  11:06 PM                26 .gitignore
09/15/2019  02:47 PM    <DIR>          .ipynb_checkpoints
09/14/2019  04:34 PM         1,216,519 Create_Train_Test_Set.ipynb
09/14/2019  03:53 PM    <DIR>          data
08/22/2019  11:09 PM           455,126 Download-ImageNet.html
09/09/2019  12:35 AM           288,923 Download-ImageNet.ipynb
09/03/2019  09:40 PM           367,769 Download-Pexels.html
09/09/2019  12:35 AM            94,549 Download-Pexels.ipynb
09/09/2019  01:02 AM        10,518,772 fgs-imgs.npz
09/08/2019  11:18 PM        41,976,052 fgs-imgs128.npz
09/08/2019  11:18 PM        23,611,636 fgs-imgs96.npz
09/14/2019  03:57 PM        49,130,740 fgsOpnImg-imgs96.npz
09/16/2019  06:28 AM            14,880 FlowerPower.csv
09/16/2019  06:07 AM        98,136,496 FlowerPower.hdf5
09/14/2019  03:06 PM       226,409,716 flr102-imgs96.npz
09/09/2019  01:02 AM        15,728,884 flr-imgs.npz
09/08/2019  11:18 PM        62,374,132 flr-imgs128.npz
09/08/2019  11:18 PM        35,085,556 flr-imgs96.npz
09/09/2019  01:02 AM        13,295,860 flrnonflr-test-imgs.npz
09/08/2019  11:18 PM        52,445,428 flrnonflr-test-imgs128.npz
09/08/2019  11:18 PM        29,500,660 flrnonflr-test-imgs96-0.8.npz
09/14/2019  04:13 PM       102,187,252 flrnonflr-test-imgs96-0.8+.npz
09/08/2019  11:18 PM        14,764,276 flrnonflr-test-imgs96-0.9.npz
09/09/2019  01:02 AM             8,900 flrnonflr-test-labels.npz
09/08/2019  11:18 PM             8,780 flrnonflr-test-labels128.npz
09/08/2019  11:18 PM             8,780 flrnonflr-test-labels96-0.8.npz
09/14/2019  07:39 PM            29,812 flrnonflr-test-labels96-0.8+.npz
09/08/2019  11:18 PM             4,516 flrnonflr-test-labels96-0.9.npz
09/09/2019  01:02 AM        53,133,556 flrnonflr-train-imgs.npz
09/08/2019  11:18 PM       209,584,372 flrnonflr-train-imgs128.npz
09/08/2019  11:18 PM       117,891,316 flrnonflr-train-imgs96-0.8.npz
09/14/2019  04:13 PM       408,748,276 flrnonflr-train-imgs96-0.8+.npz
09/08/2019  11:18 PM       132,627,700 flrnonflr-train-imgs96-0.9.npz
09/09/2019  01:02 AM            34,836 flrnonflr-train-labels.npz
09/08/2019  11:18 PM            34,356 flrnonflr-train-labels128.npz
09/08/2019  11:18 PM            34,356 flrnonflr-train-labels96-0.8.npz
09/14/2019  04:13 PM           118,516 flrnonflr-train-labels96-0.8+.npz
09/08/2019  11:18 PM            38,620 flrnonflr-train-labels96-0.9.npz
08/17/2019  11:53 AM           124,162 ImageNet-Flowers.txt
08/17/2019  03:54 PM            75,692 ImageNet-Fungus.txt
08/17/2019  03:57 PM            81,424 ImageNet-Rocks.txt
09/15/2019  09:58 PM            66,035 Inception-ResNet-v1 & v2.ipynb
09/15/2019  03:16 PM            58,343 Inception-v4.ipynb
09/14/2019  11:39 PM            26,103 model.pdf
09/14/2019  07:39 PM    <DIR>          npz
09/03/2019  09:40 PM           128,688 Pexels-Flowers.txt
09/03/2019  09:40 PM            28,575 Pexels-Umbrellas.txt
09/09/2019  01:02 AM        22,733,044 pxl_flr-imgs.npz
09/08/2019  11:18 PM        88,080,628 pxl_flr-imgs128.npz
09/08/2019  11:18 PM        49,545,460 pxl_flr-imgs96.npz
09/09/2019  01:02 AM         5,173,492 pxl_umb-imgs.npz
09/08/2019  11:18 PM        20,594,932 pxl_umb-imgs128.npz
09/08/2019  11:18 PM        11,584,756 pxl_umb-imgs96.npz
09/09/2019  01:02 AM        12,275,956 rck-imgs.npz
09/08/2019  11:18 PM        49,004,788 rck-imgs128.npz
09/08/2019  11:18 PM        27,565,300 rck-imgs96.npz
09/14/2019  04:01 PM    <DIR>          readings
08/22/2019  11:02 PM                44 README.md
09/14/2019  04:21 PM           417,457 Reshape_Resize_Images.ipynb
09/09/2019  12:48 AM         8,546,104 train_Neural_Network (Conv2D, 96-0.8).html
09/15/2019  10:09 PM         2,427,075 train_Neural_Network (InceptionResNetV2, 96-0.8, Added data, try13).html
09/15/2019  02:35 AM        12,032,935 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try10).html
09/15/2019  11:36 AM         2,387,331 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try11).html
09/15/2019  05:42 PM         2,291,568 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try12).html
09/16/2019  06:31 AM         5,790,782 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try14).html
09/14/2019  08:36 PM         7,071,416 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp + Added data, try9).html
09/11/2019  01:01 AM         4,494,650 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try6).html
09/11/2019  10:59 PM         6,116,768 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try7).html
09/12/2019  02:35 AM         5,851,809 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try8).html
09/09/2019  03:08 AM         3,900,219 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try3).html
09/09/2019  11:09 PM         6,528,529 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try4).html
09/10/2019  08:44 PM         6,636,754 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try5).html
09/09/2019  01:32 AM         6,583,279 train_Neural_Network (ResNetV1, 96-0.8, Dropout, try1).html
09/09/2019  02:40 AM         6,300,696 train_Neural_Network (ResNetV1, 96-0.8, Dropout, try2).html
09/09/2019  01:23 AM         6,446,135 train_Neural_Network (ResNetV1, 96-0.8, no Dropout, try1).html
09/16/2019  06:31 AM        16,052,591 train_Neural_Network.ipynb
09/14/2019  04:08 PM        88,003,828 umbOpnImg-imgs96.npz
09/14/2019  07:39 PM         2,094,090 VGG_Model_Setup.ipynb
09/14/2019  07:39 PM            17,772 VGG_Model_Train_Test.ipynb
              76 File(s)  2,187,049,197 bytes
               6 Dir(s)  85,549,551,616 bytes free
In [5]:
flowers = glob.glob('./data/flr_*.jpg')
fungus = glob.glob('./data/fgs_*.jpg')
rocks = glob.glob('./data/rck_*.jpg')

pixel_flowers = glob.glob('./data/pxl_flower_*.jpeg')
pixel_umbrella = glob.glob('./data/pxl_umbrella_*.jpeg')
print("There are %s, %s flower, %s fungus, %s rock and %s umbrella pictures" %(len(flowers), len(pixel_flowers), len(fungus), len(rocks), len(pixel_umbrella)))
There are 1269, 1792 flower, 856 fungus, 1007 rock and 420 umbrella pictures
In [6]:
# Randomly show 10 examples of the images
from IPython.display import Image
    
dataset = flowers #flowers #fungus #rocks

for i in range(0, 5):
    index = random.randint(0, len(dataset)-1)   
    print("Showing:", dataset[index])
    
    img = mpimg.imread(dataset[index])
    imgplot = plt.imshow(img)
    plt.show()

#Image(dataset[index])
Showing: ./data\flr_01149.jpg
Showing: ./data\flr_01639.jpg
Showing: ./data\flr_00363.jpg
Showing: ./data\flr_01880.jpg
Showing: ./data\flr_01680.jpg

Extract the training and testing datasets

In [7]:
# Load the data
trDatOrg       = np.load('flrnonflr-train-imgs96-0.8+.npz')['arr_0']
trLblOrg       = np.load('flrnonflr-train-labels96-0.8+.npz')['arr_0']
tsDatOrg       = np.load('flrnonflr-test-imgs96-0.8+.npz')['arr_0']
tsLblOrg       = np.load('flrnonflr-test-labels96-0.8+.npz')['arr_0']
In [8]:
print("For the training and test datasets:")
print("The shapes are %s, %s, %s, %s" \
      %(trDatOrg.shape, trLblOrg.shape, tsDatOrg.shape, tsLblOrg.shape))
For the training and test datasets:
The shapes are (14784, 96, 96, 3), (14784,), (3696, 96, 96, 3), (3696,)
In [9]:
# Randomly show 10 examples of the images

data = tsDatOrg
label = tsLblOrg

for i in range(20):
    index = random.randint(0, len(data)-1)
    print("Showing %s index image, It is %s" %(index, label[index]))
    imgplot = plt.imshow(data[index])
    plt.show()
Showing 2132 index image, It is 1.0
Showing 2446 index image, It is 0.0
Showing 2733 index image, It is 0.0
Showing 2795 index image, It is 0.0
Showing 2361 index image, It is 0.0
Showing 1673 index image, It is 1.0
Showing 1502 index image, It is 1.0
Showing 3005 index image, It is 0.0
Showing 2014 index image, It is 1.0
Showing 2768 index image, It is 0.0
Showing 3574 index image, It is 0.0
Showing 746 index image, It is 1.0
Showing 603 index image, It is 1.0
Showing 1780 index image, It is 1.0
Showing 1891 index image, It is 1.0
Showing 1085 index image, It is 1.0
Showing 2683 index image, It is 0.0
Showing 3330 index image, It is 0.0
Showing 1202 index image, It is 1.0
Showing 357 index image, It is 1.0
In [10]:
# Convert the data into 'float32'
# Rescale the values from 0~255 to 0~1
trDat       = trDatOrg.astype('float32')/255
tsDat       = tsDatOrg.astype('float32')/255

# Retrieve the row size of each image
# Retrieve the column size of each image
imgrows     = trDat.shape[1]
imgclms     = trDat.shape[2]
channel     = 3

# # reshape the data to be [samples][width][height][channel]
# # This is required by Keras framework
# trDat       = trDat.reshape(trDat.shape[0], imgrows, imgclms, channel)
# tsDat       = tsDat.reshape(tsDat.shape[0], imgrows, imgclms, channel)

# Perform one hot encoding on the labels
# Retrieve the number of classes in this problem
trLbl       = to_categorical(trLblOrg)
tsLbl       = to_categorical(tsLblOrg)
num_classes = tsLbl.shape[1]
In [11]:
# fix random seed for reproducibility
seed = 29
np.random.seed(seed)


modelname = 'FlowerPower'

#optmz = optimizers.Adam(lr=0.001)
optmz = optimizers.RMSprop(lr=0.001)
In [12]:
# Baseline Model -> func: createBaselineModel()

def createBaselineModel():
    inputs = Input(shape=(imgrows, imgclms, channel))
    x = Conv2D(30, (4, 4), activation='relu')(inputs)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Conv2D(50, (4, 4), activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.3)(x)
    x = Flatten()(x)
    x = Dense(32, activation='relu')(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=[inputs],outputs=x)
    
    model.compile(loss='categorical_crossentropy', 
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
In [13]:
# ResNetV1 -> func: createResNetV1()
def resLyr(inputs,
           numFilters=16,
           kernelSz=3,
           strides=1,
           activation='relu',
           batchNorm=True,
           convFirst=True,
           lyrName=None):
    convLyr = Conv2D(numFilters, kernel_size=kernelSz, strides=strides, 
                     padding='same', kernel_initializer='he_normal', 
                     kernel_regularizer=l2(1e-4), 
                     name=lyrName+'_conv' if lyrName else None)
    x = inputs
    if convFirst:
        x = convLyr(x)
        if batchNorm:
            x = BatchNormalization(name=lyrName+'_bn' if lyrName else None)(x)
        if activation is not None:
            x = Activation(activation,name=lyrName+'_'+activation if lyrName else None)(x)
    else:
        if batchNorm:
            x = BatchNormalization(name=lyrName+'_bn' if lyrName else None)(x)
        if activation is not None:
            x = Activation(activation, name=lyrName+'_'+activation if lyrName else None)(x)
        x = convLyr(x)
    return x


def resBlkV1(inputs,
             numFilters=16,
             numBlocks=3,
             downsampleOnFirst=True,
             names=None):
    x = inputs
    for run in range(0,numBlocks):
        strides = 1
        blkStr = str(run+1)
        if downsampleOnFirst and run == 0:
            strides = 2
        y = resLyr(inputs=x, numFilters=numFilters, strides=strides,
                   lyrName=names+'_Blk'+blkStr+'_Res1' if names else None)
        y = resLyr(inputs=y, numFilters=numFilters, activation=None,
                   lyrName=names+'_Blk'+blkStr+'_Res2' if names else None)
        if downsampleOnFirst and run == 0:
            x = resLyr(inputs=x, numFilters=numFilters, kernelSz=1,
                       strides=strides, activation=None, batchNorm=False,
                       lyrName=names+'_Blk'+blkStr+'_lin' if names else None)
        x = add([x,y], name=names+'_Blk'+blkStr+'_add' if names else None)
        x = Activation('relu', name=names+'_Blk'+blkStr+'_relu' if names else None)(x)
    return x

def createResNetV1(inputShape=(imgrows, imgclms, channel),
                   numClasses=2):
    inputs = Input(shape=inputShape)
    v = resLyr(inputs, lyrName='Inpt')
    v = resBlkV1(inputs=v, numFilters=16, numBlocks=3,
                 downsampleOnFirst=False, names='Stg1')
    v = Dropout(0.30)(v)
    v = resBlkV1(inputs=v, numFilters=32, numBlocks=3,
                 downsampleOnFirst=True, names='Stg2')
    v = Dropout(0.40)(v)
    v = resBlkV1(inputs=v, numFilters=64, numBlocks=3,
                 downsampleOnFirst=True, names='Stg3')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=128, numBlocks=3,
                 downsampleOnFirst=True, names='Stg4')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=128, numBlocks=3,
                 downsampleOnFirst=False, names='Stg5')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=256, numBlocks=3,
                 downsampleOnFirst=True, names='Stg6')
    v = Dropout(0.50)(v)
    v = AveragePooling2D(pool_size=6, name='AvgPool')(v)
    v = Flatten()(v) 
    outputs = Dense(numClasses, activation='softmax', 
                    kernel_initializer='he_normal')(v)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer=optmz, 
                  metrics=['accuracy'])
    return model
In [14]:
# Mostly Original # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=192, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=224, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=256, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=448, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=512, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=320, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=320, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = reduction_b_block(x)
    
    x = inception_c_block(x)
    x = inception_c_block(x)
    x = inception_c_block(x)
    
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1536)(x) # Changed
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [15]:
# Modified2 # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=192, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=224, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=256, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=448, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=512, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=320, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=320, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = reduction_b_block(x)
    
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1536)(x) # Changed
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [16]:
# Modified #(halfed) # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=16, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=48, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=32, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=32, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=48, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=48, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=112, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=128, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=112, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=112, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=128, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=128, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=128, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 96, 112, 128, 192
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 96, 96, 128, 192
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 128, 128, 192, 192
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=128, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=160, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=160, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = reduction_b_block(x)
    
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(256)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer=optmz, 
                  metrics=['accuracy'])
    return model 
In [17]:
# Mostly Original # Inception-Res-v2 -> func: create_inception_resnet_v2()
def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_resnet_v2_a_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs
    
    x_L_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_M_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x_M_1)
    
    x_R_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_M_2, x_R_3])
    x_C_2 = Conv2D(filters=384, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_b_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=160, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=192, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=1152, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)

    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_c_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=2048, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def inception_resnet_v2_reduction_b_block(inputs,
                      names=None):   
    x = inputs
    
    x_EL_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_ML_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ML_2 = Conv2D(filters=384, kernel_size=(3, 3), strides=2, padding='valid')(x_ML_1)
    
    x_MR_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_MR_1)
    
    x_ER_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same')(x_ER_1)
    x_ER_3 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_ER_2)
    
    x = concatenate([x_EL_1, x_ML_2, x_MR_2, x_ER_3])
    return x

def create_inception_resnet_v2(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-ResNet-v2"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_reduction_b_block(x)
    
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1792)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [18]:
# Modified # Inception-Res-v2 -> func: create_inception_resnet_v2()
def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_resnet_v2_a_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs
    
    x_L_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_M_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x_M_1)
    
    x_R_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_M_2, x_R_3])
    x_C_2 = Conv2D(filters=384, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_b_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=160, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=192, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=1152, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)

    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_c_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=2048, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def inception_resnet_v2_reduction_b_block(inputs,
                      names=None):   
    x = inputs
    
    x_EL_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_ML_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ML_2 = Conv2D(filters=384, kernel_size=(3, 3), strides=2, padding='valid')(x_ML_1)
    
    x_MR_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_MR_1)
    
    x_ER_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same')(x_ER_1)
    x_ER_3 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_ER_2)
    
    x = concatenate([x_EL_1, x_ML_2, x_MR_2, x_ER_3])
    return x

def create_inception_resnet_v2(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-ResNet-v2"
    inputs = Input(shape=inputShape)
    
    batch_norm = True
    no_a_block = 3
    no_b_block = 5
    no_c_block = 3
    
    x = stem_block(inputs)
    
    for i in range(no_a_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_a_block(x, scale=0.1)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    for i in range(no_b_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_reduction_b_block(x)
    
    for i in range(no_c_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_c_block(x, scale=0.1)

    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1792)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [19]:
# Setup the models
model       = create_inception_v4() # This is meant for training
modelGo     = create_inception_v4() # This is used for final testing

model.summary()
WARNING:tensorflow:From D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\keras\initializers.py:104: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with distribution=normal is deprecated and will be removed in a future version.
Instructions for updating:
`normal` is a deprecated alias for `truncated_normal`
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            (None, 96, 96, 3)    0                                            
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 47, 47, 32)   896         input_1[0][0]                    
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 45, 45, 32)   9248        conv2d[0][0]                     
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 45, 45, 64)   18496       conv2d_1[0][0]                   
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 22, 22, 64)   0           conv2d_2[0][0]                   
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 22, 22, 96)   55392       conv2d_2[0][0]                   
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 22, 22, 160)  0           max_pooling2d[0][0]              
                                                                 conv2d_3[0][0]                   
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 22, 22, 64)   10304       concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 22, 22, 64)   28736       conv2d_6[0][0]                   
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 22, 22, 64)   10304       concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 22, 22, 64)   28736       conv2d_7[0][0]                   
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 20, 20, 96)   55392       conv2d_4[0][0]                   
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 20, 20, 96)   55392       conv2d_8[0][0]                   
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 20, 20, 192)  0           conv2d_5[0][0]                   
                                                                 conv2d_9[0][0]                   
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 9, 9, 192)    331968      concatenate_1[0][0]              
__________________________________________________________________________________________________
zero_padding2d (ZeroPadding2D)  (None, 10, 10, 192)  0           conv2d_10[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 10, 10, 192)  0           concatenate_1[0][0]              
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 10, 10, 384)  0           zero_padding2d[0][0]             
                                                                 max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 10, 10, 384)  1536        concatenate_2[0][0]              
__________________________________________________________________________________________________
conv2d_15 (Conv2D)              (None, 10, 10, 32)   12320       batch_normalization[0][0]        
__________________________________________________________________________________________________
average_pooling2d (AveragePooli (None, 10, 10, 384)  0           batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_13 (Conv2D)              (None, 10, 10, 32)   12320       batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_16 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_15[0][0]                  
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 10, 10, 48)   18480       average_pooling2d[0][0]          
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 10, 10, 48)   18480       batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_14 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_13[0][0]                  
__________________________________________________________________________________________________
conv2d_17 (Conv2D)              (None, 10, 10, 48)   20784       conv2d_16[0][0]                  
__________________________________________________________________________________________________
concatenate_3 (Concatenate)     (None, 10, 10, 192)  0           conv2d_11[0][0]                  
                                                                 conv2d_12[0][0]                  
                                                                 conv2d_14[0][0]                  
                                                                 conv2d_17[0][0]                  
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 10, 10, 192)  768         concatenate_3[0][0]              
__________________________________________________________________________________________________
conv2d_22 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_1[0][0]      
__________________________________________________________________________________________________
average_pooling2d_1 (AveragePoo (None, 10, 10, 192)  0           batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_20 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_23 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_22[0][0]                  
__________________________________________________________________________________________________
conv2d_18 (Conv2D)              (None, 10, 10, 48)   9264        average_pooling2d_1[0][0]        
__________________________________________________________________________________________________
conv2d_19 (Conv2D)              (None, 10, 10, 48)   9264        batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_21 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_20[0][0]                  
__________________________________________________________________________________________________
conv2d_24 (Conv2D)              (None, 10, 10, 48)   20784       conv2d_23[0][0]                  
__________________________________________________________________________________________________
concatenate_4 (Concatenate)     (None, 10, 10, 192)  0           conv2d_18[0][0]                  
                                                                 conv2d_19[0][0]                  
                                                                 conv2d_21[0][0]                  
                                                                 conv2d_24[0][0]                  
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 10, 10, 192)  768         concatenate_4[0][0]              
__________________________________________________________________________________________________
conv2d_29 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_2[0][0]      
__________________________________________________________________________________________________
average_pooling2d_2 (AveragePoo (None, 10, 10, 192)  0           batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_27 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_30 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_29[0][0]                  
__________________________________________________________________________________________________
conv2d_25 (Conv2D)              (None, 10, 10, 48)   9264        average_pooling2d_2[0][0]        
__________________________________________________________________________________________________
conv2d_26 (Conv2D)              (None, 10, 10, 48)   9264        batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_28 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_27[0][0]                  
__________________________________________________________________________________________________
conv2d_31 (Conv2D)              (None, 10, 10, 48)   20784       conv2d_30[0][0]                  
__________________________________________________________________________________________________
concatenate_5 (Concatenate)     (None, 10, 10, 192)  0           conv2d_25[0][0]                  
                                                                 conv2d_26[0][0]                  
                                                                 conv2d_28[0][0]                  
                                                                 conv2d_31[0][0]                  
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 10, 10, 192)  768         concatenate_5[0][0]              
__________________________________________________________________________________________________
conv2d_36 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_3[0][0]      
__________________________________________________________________________________________________
average_pooling2d_3 (AveragePoo (None, 10, 10, 192)  0           batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_34 (Conv2D)              (None, 10, 10, 32)   6176        batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_37 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_36[0][0]                  
__________________________________________________________________________________________________
conv2d_32 (Conv2D)              (None, 10, 10, 48)   9264        average_pooling2d_3[0][0]        
__________________________________________________________________________________________________
conv2d_33 (Conv2D)              (None, 10, 10, 48)   9264        batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_35 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_34[0][0]                  
__________________________________________________________________________________________________
conv2d_38 (Conv2D)              (None, 10, 10, 48)   20784       conv2d_37[0][0]                  
__________________________________________________________________________________________________
concatenate_6 (Concatenate)     (None, 10, 10, 192)  0           conv2d_32[0][0]                  
                                                                 conv2d_33[0][0]                  
                                                                 conv2d_35[0][0]                  
                                                                 conv2d_38[0][0]                  
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 10, 10, 192)  768         concatenate_6[0][0]              
__________________________________________________________________________________________________
conv2d_40 (Conv2D)              (None, 10, 10, 192)  37056       batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_41 (Conv2D)              (None, 10, 10, 224)  387296      conv2d_40[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)  (None, 4, 4, 192)    0           batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_39 (Conv2D)              (None, 4, 4, 384)    663936      batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_42 (Conv2D)              (None, 4, 4, 256)    516352      conv2d_41[0][0]                  
__________________________________________________________________________________________________
concatenate_7 (Concatenate)     (None, 4, 4, 832)    0           max_pooling2d_2[0][0]            
                                                                 conv2d_39[0][0]                  
                                                                 conv2d_42[0][0]                  
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 4, 4, 832)    3328        concatenate_7[0][0]              
__________________________________________________________________________________________________
conv2d_48 (Conv2D)              (None, 4, 4, 96)     79968       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_49 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_48[0][0]                  
__________________________________________________________________________________________________
conv2d_45 (Conv2D)              (None, 4, 4, 96)     79968       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_50 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_49[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_4 (AveragePoo (None, 4, 4, 832)    0           batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_46 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_45[0][0]                  
__________________________________________________________________________________________________
conv2d_51 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_50[0][0]                  
__________________________________________________________________________________________________
conv2d_43 (Conv2D)              (None, 4, 4, 64)     53312       average_pooling2d_4[0][0]        
__________________________________________________________________________________________________
conv2d_44 (Conv2D)              (None, 4, 4, 192)    159936      batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_47 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_46[0][0]                  
__________________________________________________________________________________________________
conv2d_52 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_51[0][0]                  
__________________________________________________________________________________________________
concatenate_8 (Concatenate)     (None, 4, 4, 512)    0           conv2d_43[0][0]                  
                                                                 conv2d_44[0][0]                  
                                                                 conv2d_47[0][0]                  
                                                                 conv2d_52[0][0]                  
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 4, 4, 512)    2048        concatenate_8[0][0]              
__________________________________________________________________________________________________
conv2d_58 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_59 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_58[0][0]                  
__________________________________________________________________________________________________
conv2d_55 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_60 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_59[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_5 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_56 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_55[0][0]                  
__________________________________________________________________________________________________
conv2d_61 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_60[0][0]                  
__________________________________________________________________________________________________
conv2d_53 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_5[0][0]        
__________________________________________________________________________________________________
conv2d_54 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_57 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_56[0][0]                  
__________________________________________________________________________________________________
conv2d_62 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_61[0][0]                  
__________________________________________________________________________________________________
concatenate_9 (Concatenate)     (None, 4, 4, 512)    0           conv2d_53[0][0]                  
                                                                 conv2d_54[0][0]                  
                                                                 conv2d_57[0][0]                  
                                                                 conv2d_62[0][0]                  
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 4, 4, 512)    2048        concatenate_9[0][0]              
__________________________________________________________________________________________________
conv2d_68 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_69 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_68[0][0]                  
__________________________________________________________________________________________________
conv2d_65 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_70 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_69[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_6 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_66 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_65[0][0]                  
__________________________________________________________________________________________________
conv2d_71 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_70[0][0]                  
__________________________________________________________________________________________________
conv2d_63 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_6[0][0]        
__________________________________________________________________________________________________
conv2d_64 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv2d_67 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_66[0][0]                  
__________________________________________________________________________________________________
conv2d_72 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_71[0][0]                  
__________________________________________________________________________________________________
concatenate_10 (Concatenate)    (None, 4, 4, 512)    0           conv2d_63[0][0]                  
                                                                 conv2d_64[0][0]                  
                                                                 conv2d_67[0][0]                  
                                                                 conv2d_72[0][0]                  
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 4, 4, 512)    2048        concatenate_10[0][0]             
__________________________________________________________________________________________________
conv2d_78 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_79 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_78[0][0]                  
__________________________________________________________________________________________________
conv2d_75 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_80 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_79[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_7 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_76 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_75[0][0]                  
__________________________________________________________________________________________________
conv2d_81 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_80[0][0]                  
__________________________________________________________________________________________________
conv2d_73 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_7[0][0]        
__________________________________________________________________________________________________
conv2d_74 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d_77 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_76[0][0]                  
__________________________________________________________________________________________________
conv2d_82 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_81[0][0]                  
__________________________________________________________________________________________________
concatenate_11 (Concatenate)    (None, 4, 4, 512)    0           conv2d_73[0][0]                  
                                                                 conv2d_74[0][0]                  
                                                                 conv2d_77[0][0]                  
                                                                 conv2d_82[0][0]                  
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 4, 4, 512)    2048        concatenate_11[0][0]             
__________________________________________________________________________________________________
conv2d_88 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_89 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_88[0][0]                  
__________________________________________________________________________________________________
conv2d_85 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_90 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_89[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_8 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_86 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_85[0][0]                  
__________________________________________________________________________________________________
conv2d_91 (Conv2D)              (None, 4, 4, 112)    87920       conv2d_90[0][0]                  
__________________________________________________________________________________________________
conv2d_83 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_8[0][0]        
__________________________________________________________________________________________________
conv2d_84 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_9[0][0]      
__________________________________________________________________________________________________
conv2d_87 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_86[0][0]                  
__________________________________________________________________________________________________
conv2d_92 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_91[0][0]                  
__________________________________________________________________________________________________
concatenate_12 (Concatenate)    (None, 4, 4, 512)    0           conv2d_83[0][0]                  
                                                                 conv2d_84[0][0]                  
                                                                 conv2d_87[0][0]                  
                                                                 conv2d_92[0][0]                  
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 4, 4, 512)    2048        concatenate_12[0][0]             
__________________________________________________________________________________________________
conv2d_98 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_99 (Conv2D)              (None, 4, 4, 96)     64608       conv2d_98[0][0]                  
__________________________________________________________________________________________________
conv2d_95 (Conv2D)              (None, 4, 4, 96)     49248       batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_100 (Conv2D)             (None, 4, 4, 112)    75376       conv2d_99[0][0]                  
__________________________________________________________________________________________________
average_pooling2d_9 (AveragePoo (None, 4, 4, 512)    0           batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_96 (Conv2D)              (None, 4, 4, 112)    75376       conv2d_95[0][0]                  
__________________________________________________________________________________________________
conv2d_101 (Conv2D)             (None, 4, 4, 112)    87920       conv2d_100[0][0]                 
__________________________________________________________________________________________________
conv2d_93 (Conv2D)              (None, 4, 4, 64)     32832       average_pooling2d_9[0][0]        
__________________________________________________________________________________________________
conv2d_94 (Conv2D)              (None, 4, 4, 192)    98496       batch_normalization_10[0][0]     
__________________________________________________________________________________________________
conv2d_97 (Conv2D)              (None, 4, 4, 128)    100480      conv2d_96[0][0]                  
__________________________________________________________________________________________________
conv2d_102 (Conv2D)             (None, 4, 4, 128)    100480      conv2d_101[0][0]                 
__________________________________________________________________________________________________
concatenate_13 (Concatenate)    (None, 4, 4, 512)    0           conv2d_93[0][0]                  
                                                                 conv2d_94[0][0]                  
                                                                 conv2d_97[0][0]                  
                                                                 conv2d_102[0][0]                 
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 4, 4, 512)    2048        concatenate_13[0][0]             
__________________________________________________________________________________________________
conv2d_108 (Conv2D)             (None, 4, 4, 96)     49248       batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_109 (Conv2D)             (None, 4, 4, 96)     64608       conv2d_108[0][0]                 
__________________________________________________________________________________________________
conv2d_105 (Conv2D)             (None, 4, 4, 96)     49248       batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_110 (Conv2D)             (None, 4, 4, 112)    75376       conv2d_109[0][0]                 
__________________________________________________________________________________________________
average_pooling2d_10 (AveragePo (None, 4, 4, 512)    0           batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_106 (Conv2D)             (None, 4, 4, 112)    75376       conv2d_105[0][0]                 
__________________________________________________________________________________________________
conv2d_111 (Conv2D)             (None, 4, 4, 112)    87920       conv2d_110[0][0]                 
__________________________________________________________________________________________________
conv2d_103 (Conv2D)             (None, 4, 4, 64)     32832       average_pooling2d_10[0][0]       
__________________________________________________________________________________________________
conv2d_104 (Conv2D)             (None, 4, 4, 192)    98496       batch_normalization_11[0][0]     
__________________________________________________________________________________________________
conv2d_107 (Conv2D)             (None, 4, 4, 128)    100480      conv2d_106[0][0]                 
__________________________________________________________________________________________________
conv2d_112 (Conv2D)             (None, 4, 4, 128)    100480      conv2d_111[0][0]                 
__________________________________________________________________________________________________
concatenate_14 (Concatenate)    (None, 4, 4, 512)    0           conv2d_103[0][0]                 
                                                                 conv2d_104[0][0]                 
                                                                 conv2d_107[0][0]                 
                                                                 conv2d_112[0][0]                 
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 4, 4, 512)    2048        concatenate_14[0][0]             
__________________________________________________________________________________________________
conv2d_115 (Conv2D)             (None, 4, 4, 128)    65664       batch_normalization_12[0][0]     
__________________________________________________________________________________________________
conv2d_116 (Conv2D)             (None, 4, 4, 128)    114816      conv2d_115[0][0]                 
__________________________________________________________________________________________________
conv2d_113 (Conv2D)             (None, 4, 4, 96)     49248       batch_normalization_12[0][0]     
__________________________________________________________________________________________________
conv2d_117 (Conv2D)             (None, 4, 4, 160)    143520      conv2d_116[0][0]                 
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)  (None, 1, 1, 512)    0           batch_normalization_12[0][0]     
__________________________________________________________________________________________________
conv2d_114 (Conv2D)             (None, 1, 1, 96)     83040       conv2d_113[0][0]                 
__________________________________________________________________________________________________
conv2d_118 (Conv2D)             (None, 1, 1, 160)    230560      conv2d_117[0][0]                 
__________________________________________________________________________________________________
concatenate_15 (Concatenate)    (None, 1, 1, 768)    0           max_pooling2d_3[0][0]            
                                                                 conv2d_114[0][0]                 
                                                                 conv2d_118[0][0]                 
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 1, 1, 768)    3072        concatenate_15[0][0]             
__________________________________________________________________________________________________
conv2d_124 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_125 (Conv2D)             (None, 1, 1, 224)    129248      conv2d_124[0][0]                 
__________________________________________________________________________________________________
average_pooling2d_11 (AveragePo (None, 1, 1, 768)    0           batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_121 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_126 (Conv2D)             (None, 1, 1, 256)    172288      conv2d_125[0][0]                 
__________________________________________________________________________________________________
conv2d_119 (Conv2D)             (None, 1, 1, 128)    98432       average_pooling2d_11[0][0]       
__________________________________________________________________________________________________
conv2d_120 (Conv2D)             (None, 1, 1, 128)    98432       batch_normalization_13[0][0]     
__________________________________________________________________________________________________
conv2d_122 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_121[0][0]                 
__________________________________________________________________________________________________
conv2d_123 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_121[0][0]                 
__________________________________________________________________________________________________
conv2d_127 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_126[0][0]                 
__________________________________________________________________________________________________
conv2d_128 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_126[0][0]                 
__________________________________________________________________________________________________
concatenate_16 (Concatenate)    (None, 1, 1, 768)    0           conv2d_119[0][0]                 
                                                                 conv2d_120[0][0]                 
                                                                 conv2d_122[0][0]                 
                                                                 conv2d_123[0][0]                 
                                                                 conv2d_127[0][0]                 
                                                                 conv2d_128[0][0]                 
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 1, 1, 768)    3072        concatenate_16[0][0]             
__________________________________________________________________________________________________
conv2d_134 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_135 (Conv2D)             (None, 1, 1, 224)    129248      conv2d_134[0][0]                 
__________________________________________________________________________________________________
average_pooling2d_12 (AveragePo (None, 1, 1, 768)    0           batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_131 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_136 (Conv2D)             (None, 1, 1, 256)    172288      conv2d_135[0][0]                 
__________________________________________________________________________________________________
conv2d_129 (Conv2D)             (None, 1, 1, 128)    98432       average_pooling2d_12[0][0]       
__________________________________________________________________________________________________
conv2d_130 (Conv2D)             (None, 1, 1, 128)    98432       batch_normalization_14[0][0]     
__________________________________________________________________________________________________
conv2d_132 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_131[0][0]                 
__________________________________________________________________________________________________
conv2d_133 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_131[0][0]                 
__________________________________________________________________________________________________
conv2d_137 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_136[0][0]                 
__________________________________________________________________________________________________
conv2d_138 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_136[0][0]                 
__________________________________________________________________________________________________
concatenate_17 (Concatenate)    (None, 1, 1, 768)    0           conv2d_129[0][0]                 
                                                                 conv2d_130[0][0]                 
                                                                 conv2d_132[0][0]                 
                                                                 conv2d_133[0][0]                 
                                                                 conv2d_137[0][0]                 
                                                                 conv2d_138[0][0]                 
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 1, 1, 768)    3072        concatenate_17[0][0]             
__________________________________________________________________________________________________
conv2d_144 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_145 (Conv2D)             (None, 1, 1, 224)    129248      conv2d_144[0][0]                 
__________________________________________________________________________________________________
average_pooling2d_13 (AveragePo (None, 1, 1, 768)    0           batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_141 (Conv2D)             (None, 1, 1, 192)    147648      batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_146 (Conv2D)             (None, 1, 1, 256)    172288      conv2d_145[0][0]                 
__________________________________________________________________________________________________
conv2d_139 (Conv2D)             (None, 1, 1, 128)    98432       average_pooling2d_13[0][0]       
__________________________________________________________________________________________________
conv2d_140 (Conv2D)             (None, 1, 1, 128)    98432       batch_normalization_15[0][0]     
__________________________________________________________________________________________________
conv2d_142 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_141[0][0]                 
__________________________________________________________________________________________________
conv2d_143 (Conv2D)             (None, 1, 1, 128)    73856       conv2d_141[0][0]                 
__________________________________________________________________________________________________
conv2d_147 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_146[0][0]                 
__________________________________________________________________________________________________
conv2d_148 (Conv2D)             (None, 1, 1, 128)    98432       conv2d_146[0][0]                 
__________________________________________________________________________________________________
concatenate_18 (Concatenate)    (None, 1, 1, 768)    0           conv2d_139[0][0]                 
                                                                 conv2d_140[0][0]                 
                                                                 conv2d_142[0][0]                 
                                                                 conv2d_143[0][0]                 
                                                                 conv2d_147[0][0]                 
                                                                 conv2d_148[0][0]                 
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 1, 1, 768)    3072        concatenate_18[0][0]             
__________________________________________________________________________________________________
average_pooling2d_14 (AveragePo (None, 1, 1, 768)    0           batch_normalization_16[0][0]     
__________________________________________________________________________________________________
flatten (Flatten)               (None, 768)          0           average_pooling2d_14[0][0]       
__________________________________________________________________________________________________
dense (Dense)                   (None, 256)          196864      flatten[0][0]                    
__________________________________________________________________________________________________
dropout (Dropout)               (None, 256)          0           dense[0][0]                      
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 2)            514         dropout[0][0]                    
==================================================================================================
Total params: 12,173,266
Trainable params: 12,155,986
Non-trainable params: 17,280
__________________________________________________________________________________________________
In [20]:
# Create checkpoint for the training
# This checkpoint performs model saving when
# an epoch gives highest testing accuracy
# filepath        = modelname + ".hdf5"
# checkpoint      = ModelCheckpoint(filepath, 
#                                   monitor='val_acc', 
#                                   verbose=0, 
#                                   save_best_only=True, 
#                                   mode='max')

#                             # Log the epoch detail into csv
# csv_logger      = CSVLogger(modelname +'.csv')
# callbacks_list  = [checkpoint, csv_logger]

def lrSchedule(epoch):
    lr  = 1e-3
    
    if epoch > 270: #190
        lr  *= 0.5e-3
        
    elif epoch > 240: #160
        lr  *= 1e-3
        
    elif epoch > 200: #140
        lr  *= 1e-2
        
    elif epoch > 150: #100
        lr  *= 1e-1
        
    print('Learning rate: ', lr)
    
    return lr

LRScheduler     = LearningRateScheduler(lrSchedule)

                            # Create checkpoint for the training
                            # This checkpoint performs model saving when
                            # an epoch gives highest testing accuracy
filepath        = modelname + ".hdf5"
checkpoint      = ModelCheckpoint(filepath, 
                                  monitor='val_acc', 
                                  verbose=0, 
                                  save_best_only=True, 
                                  mode='max')

                            # Log the epoch detail into csv
csv_logger      = CSVLogger(modelname +'.csv')
callbacks_list  = [checkpoint, csv_logger, LRScheduler]
In [21]:
# Fit the model
# This is where the training starts
# model.fit(trDat, 
#           trLbl, 
#           validation_data=(tsDat, tsLbl), 
#           epochs=60, 
#           batch_size=32,
#           callbacks=callbacks_list)

datagen = ImageDataGenerator(width_shift_range=0.25,
                             height_shift_range=0.25,
                             rotation_range=45,
                             zoom_range=0.8,
                             #zca_epsilon=1e-6,
                             #zca_whitening=True,
                             fill_mode='nearest',
                             horizontal_flip=True,
                             vertical_flip=False)

model.fit_generator(datagen.flow(trDat, trLbl, batch_size=16),
                    validation_data=(tsDat, tsLbl),
                    epochs=300, #300 
                    verbose=1,
                    steps_per_epoch=len(trDat)/16,
                    callbacks=callbacks_list)
Learning rate:  0.001
Epoch 1/300
924/924 [==============================] - 191s 207ms/step - loss: 1.0292 - acc: 0.6052 - val_loss: 1.5767 - val_acc: 0.4234
Learning rate:  0.001
Epoch 2/300
924/924 [==============================] - 140s 152ms/step - loss: 0.5252 - acc: 0.7522 - val_loss: 0.8916 - val_acc: 0.7054
Learning rate:  0.001
Epoch 3/300
924/924 [==============================] - 154s 166ms/step - loss: 0.4992 - acc: 0.7708 - val_loss: 0.4134 - val_acc: 0.8206
Learning rate:  0.001
Epoch 4/300
924/924 [==============================] - 135s 146ms/step - loss: 0.4698 - acc: 0.7887 - val_loss: 0.4486 - val_acc: 0.7998
Learning rate:  0.001
Epoch 5/300
924/924 [==============================] - 134s 145ms/step - loss: 0.4566 - acc: 0.8019 - val_loss: 0.5129 - val_acc: 0.7178
Learning rate:  0.001
Epoch 6/300
924/924 [==============================] - 131s 142ms/step - loss: 0.4545 - acc: 0.7981 - val_loss: 0.3994 - val_acc: 0.8312
Learning rate:  0.001
Epoch 7/300
924/924 [==============================] - 135s 146ms/step - loss: 0.4444 - acc: 0.8068 - val_loss: 0.3917 - val_acc: 0.8304
Learning rate:  0.001
Epoch 8/300
924/924 [==============================] - 133s 144ms/step - loss: 0.4406 - acc: 0.8076 - val_loss: 0.3588 - val_acc: 0.8485
Learning rate:  0.001
Epoch 9/300
924/924 [==============================] - 133s 144ms/step - loss: 0.4321 - acc: 0.8056 - val_loss: 0.3658 - val_acc: 0.8452
Learning rate:  0.001
Epoch 10/300
924/924 [==============================] - 131s 142ms/step - loss: 0.4279 - acc: 0.8097 - val_loss: 0.3817 - val_acc: 0.8393
Learning rate:  0.001
Epoch 11/300
924/924 [==============================] - 131s 142ms/step - loss: 0.4241 - acc: 0.8157 - val_loss: 0.3682 - val_acc: 0.8534
Learning rate:  0.001
Epoch 12/300
924/924 [==============================] - 129s 140ms/step - loss: 0.4244 - acc: 0.8116 - val_loss: 0.4043 - val_acc: 0.8187
Learning rate:  0.001
Epoch 13/300
924/924 [==============================] - 132s 143ms/step - loss: 0.4189 - acc: 0.8162 - val_loss: 0.3676 - val_acc: 0.8444
Learning rate:  0.001
Epoch 14/300
924/924 [==============================] - 130s 140ms/step - loss: 0.4195 - acc: 0.8138 - val_loss: 0.3551 - val_acc: 0.8477
Learning rate:  0.001
Epoch 15/300
924/924 [==============================] - 134s 145ms/step - loss: 0.4193 - acc: 0.8128 - val_loss: 0.3565 - val_acc: 0.8490
Learning rate:  0.001
Epoch 16/300
924/924 [==============================] - 132s 143ms/step - loss: 0.4133 - acc: 0.8206 - val_loss: 0.3712 - val_acc: 0.8382
Learning rate:  0.001
Epoch 17/300
924/924 [==============================] - 130s 141ms/step - loss: 0.4149 - acc: 0.8154 - val_loss: 0.3686 - val_acc: 0.8496
Learning rate:  0.001
Epoch 18/300
924/924 [==============================] - 131s 141ms/step - loss: 0.4102 - acc: 0.8172 - val_loss: 0.3721 - val_acc: 0.8355
Learning rate:  0.001
Epoch 19/300
924/924 [==============================] - 129s 140ms/step - loss: 0.4157 - acc: 0.8170 - val_loss: 0.3488 - val_acc: 0.8588
Learning rate:  0.001
Epoch 20/300
924/924 [==============================] - 130s 140ms/step - loss: 0.4114 - acc: 0.8164 - val_loss: 0.3700 - val_acc: 0.8436
Learning rate:  0.001
Epoch 21/300
924/924 [==============================] - 134s 145ms/step - loss: 0.4058 - acc: 0.8194 - val_loss: 0.3479 - val_acc: 0.8501
Learning rate:  0.001
Epoch 22/300
924/924 [==============================] - 127s 138ms/step - loss: 0.4033 - acc: 0.8207 - val_loss: 0.3631 - val_acc: 0.8420
Learning rate:  0.001
Epoch 23/300
924/924 [==============================] - 128s 139ms/step - loss: 0.4036 - acc: 0.8226 - val_loss: 0.3415 - val_acc: 0.8563
Learning rate:  0.001
Epoch 24/300
924/924 [==============================] - 129s 140ms/step - loss: 0.3995 - acc: 0.8207 - val_loss: 0.3623 - val_acc: 0.8515
Learning rate:  0.001
Epoch 25/300
924/924 [==============================] - 130s 140ms/step - loss: 0.4011 - acc: 0.8234 - val_loss: 0.3481 - val_acc: 0.8563
Learning rate:  0.001
Epoch 26/300
924/924 [==============================] - 128s 138ms/step - loss: 0.3986 - acc: 0.8230 - val_loss: 0.3463 - val_acc: 0.8598
Learning rate:  0.001
Epoch 27/300
924/924 [==============================] - 128s 139ms/step - loss: 0.3954 - acc: 0.8246 - val_loss: 0.3847 - val_acc: 0.8363
Learning rate:  0.001
Epoch 28/300
924/924 [==============================] - 128s 139ms/step - loss: 0.3945 - acc: 0.8263 - val_loss: 0.3597 - val_acc: 0.8458
Learning rate:  0.001
Epoch 29/300
924/924 [==============================] - 128s 138ms/step - loss: 0.3945 - acc: 0.8258 - val_loss: 0.3288 - val_acc: 0.8617
Learning rate:  0.001
Epoch 30/300
924/924 [==============================] - 129s 140ms/step - loss: 0.3903 - acc: 0.8288 - val_loss: 0.3519 - val_acc: 0.8525
Learning rate:  0.001
Epoch 31/300
924/924 [==============================] - 131s 142ms/step - loss: 0.3893 - acc: 0.8292 - val_loss: 0.3174 - val_acc: 0.8677
Learning rate:  0.001
Epoch 32/300
924/924 [==============================] - 128s 138ms/step - loss: 0.3789 - acc: 0.8352 - val_loss: 0.3274 - val_acc: 0.8720
Learning rate:  0.001
Epoch 33/300
924/924 [==============================] - 127s 137ms/step - loss: 0.3808 - acc: 0.8381 - val_loss: 0.3216 - val_acc: 0.8617
Learning rate:  0.001
Epoch 34/300
924/924 [==============================] - 128s 139ms/step - loss: 0.3796 - acc: 0.8351 - val_loss: 0.3219 - val_acc: 0.8715
Learning rate:  0.001
Epoch 35/300
924/924 [==============================] - 129s 140ms/step - loss: 0.3738 - acc: 0.8375 - val_loss: 0.3796 - val_acc: 0.8450
Learning rate:  0.001
Epoch 36/300
924/924 [==============================] - 132s 143ms/step - loss: 0.3702 - acc: 0.8421 - val_loss: 0.3300 - val_acc: 0.8644
Learning rate:  0.001
Epoch 37/300
924/924 [==============================] - 131s 142ms/step - loss: 0.3650 - acc: 0.8431 - val_loss: 0.3321 - val_acc: 0.8839
Learning rate:  0.001
Epoch 38/300
924/924 [==============================] - 133s 144ms/step - loss: 0.3689 - acc: 0.8433 - val_loss: 0.2953 - val_acc: 0.8812
Learning rate:  0.001
Epoch 39/300
924/924 [==============================] - 132s 143ms/step - loss: 0.3648 - acc: 0.8428 - val_loss: 0.3336 - val_acc: 0.8626
Learning rate:  0.001
Epoch 40/300
924/924 [==============================] - 129s 140ms/step - loss: 0.3582 - acc: 0.8475 - val_loss: 0.3122 - val_acc: 0.8796
Learning rate:  0.001
Epoch 41/300
924/924 [==============================] - 129s 140ms/step - loss: 0.3596 - acc: 0.8489 - val_loss: 0.3224 - val_acc: 0.8658
Learning rate:  0.001
Epoch 42/300
924/924 [==============================] - 129s 140ms/step - loss: 0.3539 - acc: 0.8506 - val_loss: 0.3036 - val_acc: 0.8864
Learning rate:  0.001
Epoch 43/300
924/924 [==============================] - 128s 138ms/step - loss: 0.3485 - acc: 0.8547 - val_loss: 0.3685 - val_acc: 0.8433
Learning rate:  0.001
Epoch 44/300
924/924 [==============================] - 130s 141ms/step - loss: 0.3543 - acc: 0.8478 - val_loss: 0.3129 - val_acc: 0.8755
Learning rate:  0.001
Epoch 45/300
924/924 [==============================] - 125s 135ms/step - loss: 0.3579 - acc: 0.8506 - val_loss: 0.2779 - val_acc: 0.8902
Learning rate:  0.001
Epoch 46/300
924/924 [==============================] - 124s 134ms/step - loss: 0.3483 - acc: 0.8542 - val_loss: 0.2997 - val_acc: 0.8864
Learning rate:  0.001
Epoch 47/300
924/924 [==============================] - 118s 127ms/step - loss: 0.3531 - acc: 0.8511 - val_loss: 0.2883 - val_acc: 0.8869
Learning rate:  0.001
Epoch 48/300
924/924 [==============================] - 119s 129ms/step - loss: 0.3499 - acc: 0.8499 - val_loss: 0.3212 - val_acc: 0.8672
Learning rate:  0.001
Epoch 49/300
924/924 [==============================] - 121s 131ms/step - loss: 0.3433 - acc: 0.8542 - val_loss: 0.3329 - val_acc: 0.8596
Learning rate:  0.001
Epoch 50/300
924/924 [==============================] - 125s 135ms/step - loss: 0.3437 - acc: 0.8513 - val_loss: 0.3124 - val_acc: 0.8728
Learning rate:  0.001
Epoch 51/300
924/924 [==============================] - 133s 144ms/step - loss: 0.3386 - acc: 0.8573 - val_loss: 0.2966 - val_acc: 0.8742
Learning rate:  0.001
Epoch 52/300
924/924 [==============================] - 134s 145ms/step - loss: 0.3437 - acc: 0.8529 - val_loss: 0.2894 - val_acc: 0.8904
Learning rate:  0.001
Epoch 53/300
924/924 [==============================] - 128s 139ms/step - loss: 0.3447 - acc: 0.8563 - val_loss: 0.2670 - val_acc: 0.8958
Learning rate:  0.001
Epoch 54/300
924/924 [==============================] - 133s 144ms/step - loss: 0.3400 - acc: 0.8569 - val_loss: 0.2847 - val_acc: 0.8861
Learning rate:  0.001
Epoch 55/300
924/924 [==============================] - 133s 143ms/step - loss: 0.3400 - acc: 0.8563 - val_loss: 0.2787 - val_acc: 0.8885
Learning rate:  0.001
Epoch 56/300
924/924 [==============================] - 135s 146ms/step - loss: 0.3377 - acc: 0.8567 - val_loss: 0.2872 - val_acc: 0.8904
Learning rate:  0.001
Epoch 57/300
924/924 [==============================] - 153s 166ms/step - loss: 0.3312 - acc: 0.8605 - val_loss: 0.2713 - val_acc: 0.8988
Learning rate:  0.001
Epoch 58/300
924/924 [==============================] - 139s 151ms/step - loss: 0.3324 - acc: 0.8619 - val_loss: 0.2898 - val_acc: 0.8804
Learning rate:  0.001
Epoch 59/300
924/924 [==============================] - 139s 151ms/step - loss: 0.3360 - acc: 0.8573 - val_loss: 0.2772 - val_acc: 0.8896
Learning rate:  0.001
Epoch 60/300
924/924 [==============================] - 137s 148ms/step - loss: 0.3326 - acc: 0.8583 - val_loss: 0.2700 - val_acc: 0.8934
Learning rate:  0.001
Epoch 61/300
924/924 [==============================] - 143s 155ms/step - loss: 0.3357 - acc: 0.8588 - val_loss: 0.2776 - val_acc: 0.8899
Learning rate:  0.001
Epoch 62/300
924/924 [==============================] - 143s 154ms/step - loss: 0.3317 - acc: 0.8594 - val_loss: 0.2591 - val_acc: 0.9007
Learning rate:  0.001
Epoch 63/300
924/924 [==============================] - 141s 153ms/step - loss: 0.3273 - acc: 0.8613 - val_loss: 0.2732 - val_acc: 0.8918
Learning rate:  0.001
Epoch 64/300
924/924 [==============================] - 142s 153ms/step - loss: 0.3297 - acc: 0.8613 - val_loss: 0.2782 - val_acc: 0.8931
Learning rate:  0.001
Epoch 65/300
924/924 [==============================] - 145s 157ms/step - loss: 0.3283 - acc: 0.8638 - val_loss: 0.2763 - val_acc: 0.8926
Learning rate:  0.001
Epoch 66/300
924/924 [==============================] - 155s 168ms/step - loss: 0.3261 - acc: 0.8646 - val_loss: 0.2793 - val_acc: 0.8969
Learning rate:  0.001
Epoch 67/300
924/924 [==============================] - 136s 147ms/step - loss: 0.3242 - acc: 0.8614 - val_loss: 0.2589 - val_acc: 0.8988
Learning rate:  0.001
Epoch 68/300
924/924 [==============================] - 132s 143ms/step - loss: 0.3308 - acc: 0.8601 - val_loss: 0.2624 - val_acc: 0.9029
Learning rate:  0.001
Epoch 69/300
924/924 [==============================] - 147s 159ms/step - loss: 0.3289 - acc: 0.8631 - val_loss: 0.2778 - val_acc: 0.8912
Learning rate:  0.001
Epoch 70/300
924/924 [==============================] - 137s 148ms/step - loss: 0.3258 - acc: 0.8615 - val_loss: 0.3162 - val_acc: 0.8850
Learning rate:  0.001
Epoch 71/300
924/924 [==============================] - 140s 151ms/step - loss: 0.3201 - acc: 0.8673 - val_loss: 0.2767 - val_acc: 0.8910
Learning rate:  0.001
Epoch 72/300
924/924 [==============================] - 142s 154ms/step - loss: 0.3207 - acc: 0.8631 - val_loss: 0.2551 - val_acc: 0.8985
Learning rate:  0.001
Epoch 73/300
924/924 [==============================] - 138s 149ms/step - loss: 0.3206 - acc: 0.8667 - val_loss: 0.2682 - val_acc: 0.8939
Learning rate:  0.001
Epoch 74/300
924/924 [==============================] - 143s 155ms/step - loss: 0.3172 - acc: 0.8677 - val_loss: 0.3368 - val_acc: 0.8523
Learning rate:  0.001
Epoch 75/300
924/924 [==============================] - 139s 150ms/step - loss: 0.3235 - acc: 0.8646 - val_loss: 0.2632 - val_acc: 0.8994
Learning rate:  0.001
Epoch 76/300
924/924 [==============================] - 125s 135ms/step - loss: 0.3161 - acc: 0.8713 - val_loss: 0.2776 - val_acc: 0.8929
Learning rate:  0.001
Epoch 77/300
924/924 [==============================] - 131s 142ms/step - loss: 0.3189 - acc: 0.8660 - val_loss: 0.2777 - val_acc: 0.8899
Learning rate:  0.001
Epoch 78/300
924/924 [==============================] - 138s 150ms/step - loss: 0.3200 - acc: 0.8698 - val_loss: 0.2516 - val_acc: 0.9037
Learning rate:  0.001
Epoch 79/300
924/924 [==============================] - 140s 152ms/step - loss: 0.3145 - acc: 0.8695 - val_loss: 0.2525 - val_acc: 0.9018
Learning rate:  0.001
Epoch 80/300
924/924 [==============================] - 136s 147ms/step - loss: 0.3121 - acc: 0.8725 - val_loss: 0.2621 - val_acc: 0.8996
Learning rate:  0.001
Epoch 81/300
924/924 [==============================] - 132s 143ms/step - loss: 0.3170 - acc: 0.8701 - val_loss: 0.2655 - val_acc: 0.8950
Learning rate:  0.001
Epoch 82/300
924/924 [==============================] - 134s 145ms/step - loss: 0.3100 - acc: 0.8697 - val_loss: 0.2734 - val_acc: 0.9010
Learning rate:  0.001
Epoch 83/300
924/924 [==============================] - 126s 137ms/step - loss: 0.3124 - acc: 0.8698 - val_loss: 0.2774 - val_acc: 0.8896
Learning rate:  0.001
Epoch 84/300
924/924 [==============================] - 133s 144ms/step - loss: 0.3192 - acc: 0.8686 - val_loss: 0.2579 - val_acc: 0.9018
Learning rate:  0.001
Epoch 85/300
924/924 [==============================] - 132s 142ms/step - loss: 0.3127 - acc: 0.8716 - val_loss: 0.2879 - val_acc: 0.8872
Learning rate:  0.001
Epoch 86/300
924/924 [==============================] - 137s 148ms/step - loss: 0.3107 - acc: 0.8711 - val_loss: 0.2574 - val_acc: 0.9026
Learning rate:  0.001
Epoch 87/300
924/924 [==============================] - 131s 142ms/step - loss: 0.3114 - acc: 0.8714 - val_loss: 0.2734 - val_acc: 0.9023
Learning rate:  0.001
Epoch 88/300
924/924 [==============================] - 125s 136ms/step - loss: 0.3078 - acc: 0.8715 - val_loss: 0.2471 - val_acc: 0.9061
Learning rate:  0.001
Epoch 89/300
924/924 [==============================] - 131s 142ms/step - loss: 0.3049 - acc: 0.8753 - val_loss: 0.2409 - val_acc: 0.9110
Learning rate:  0.001
Epoch 90/300
924/924 [==============================] - 140s 151ms/step - loss: 0.3115 - acc: 0.8725 - val_loss: 0.2393 - val_acc: 0.9069
Learning rate:  0.001
Epoch 91/300
924/924 [==============================] - 131s 142ms/step - loss: 0.3113 - acc: 0.8726 - val_loss: 0.2485 - val_acc: 0.9034
Learning rate:  0.001
Epoch 92/300
924/924 [==============================] - 124s 134ms/step - loss: 0.3048 - acc: 0.8761 - val_loss: 0.2810 - val_acc: 0.8915
Learning rate:  0.001
Epoch 93/300
924/924 [==============================] - 143s 155ms/step - loss: 0.3014 - acc: 0.8756 - val_loss: 0.2500 - val_acc: 0.9031
Learning rate:  0.001
Epoch 94/300
924/924 [==============================] - 142s 154ms/step - loss: 0.3099 - acc: 0.8727 - val_loss: 0.2916 - val_acc: 0.8782
Learning rate:  0.001
Epoch 95/300
924/924 [==============================] - 134s 145ms/step - loss: 0.3038 - acc: 0.8745 - val_loss: 0.2459 - val_acc: 0.9037
Learning rate:  0.001
Epoch 96/300
924/924 [==============================] - 139s 150ms/step - loss: 0.3050 - acc: 0.8768 - val_loss: 0.2499 - val_acc: 0.9012
Learning rate:  0.001
Epoch 97/300
924/924 [==============================] - 138s 149ms/step - loss: 0.3084 - acc: 0.8731 - val_loss: 0.2370 - val_acc: 0.9085
Learning rate:  0.001
Epoch 98/300
924/924 [==============================] - 131s 142ms/step - loss: 0.3090 - acc: 0.8711 - val_loss: 0.2482 - val_acc: 0.9056
Learning rate:  0.001
Epoch 99/300
924/924 [==============================] - 140s 152ms/step - loss: 0.3077 - acc: 0.8739 - val_loss: 0.2670 - val_acc: 0.8910
Learning rate:  0.001
Epoch 100/300
924/924 [==============================] - 138s 150ms/step - loss: 0.3089 - acc: 0.8747 - val_loss: 0.2534 - val_acc: 0.9042
Learning rate:  0.001
Epoch 101/300
924/924 [==============================] - 140s 151ms/step - loss: 0.2973 - acc: 0.8774 - val_loss: 0.2381 - val_acc: 0.9056
Learning rate:  0.001
Epoch 102/300
924/924 [==============================] - 136s 147ms/step - loss: 0.3035 - acc: 0.8766 - val_loss: 0.2423 - val_acc: 0.9153
Learning rate:  0.001
Epoch 103/300
924/924 [==============================] - 140s 152ms/step - loss: 0.3048 - acc: 0.8713 - val_loss: 0.2508 - val_acc: 0.9061
Learning rate:  0.001
Epoch 104/300
924/924 [==============================] - 137s 148ms/step - loss: 0.3031 - acc: 0.8778 - val_loss: 0.2312 - val_acc: 0.9102
Learning rate:  0.001
Epoch 105/300
924/924 [==============================] - 145s 157ms/step - loss: 0.3007 - acc: 0.8768 - val_loss: 0.2539 - val_acc: 0.9007
Learning rate:  0.001
Epoch 106/300
924/924 [==============================] - 133s 144ms/step - loss: 0.3001 - acc: 0.8747 - val_loss: 0.2575 - val_acc: 0.8975
Learning rate:  0.001
Epoch 107/300
924/924 [==============================] - 136s 147ms/step - loss: 0.3028 - acc: 0.8767 - val_loss: 0.2730 - val_acc: 0.8937
Learning rate:  0.001
Epoch 108/300
924/924 [==============================] - 134s 145ms/step - loss: 0.2982 - acc: 0.8794 - val_loss: 0.2340 - val_acc: 0.9199
Learning rate:  0.001
Epoch 109/300
924/924 [==============================] - 131s 142ms/step - loss: 0.2937 - acc: 0.8823 - val_loss: 0.3019 - val_acc: 0.8726
Learning rate:  0.001
Epoch 110/300
924/924 [==============================] - 128s 139ms/step - loss: 0.2920 - acc: 0.8788 - val_loss: 0.2517 - val_acc: 0.8985
Learning rate:  0.001
Epoch 111/300
924/924 [==============================] - 127s 138ms/step - loss: 0.2955 - acc: 0.8818 - val_loss: 0.2463 - val_acc: 0.9015
Learning rate:  0.001
Epoch 112/300
924/924 [==============================] - 129s 140ms/step - loss: 0.2960 - acc: 0.8793 - val_loss: 0.2474 - val_acc: 0.9150
Learning rate:  0.001
Epoch 113/300
924/924 [==============================] - 132s 142ms/step - loss: 0.2971 - acc: 0.8813 - val_loss: 0.2593 - val_acc: 0.9058
Learning rate:  0.001
Epoch 114/300
924/924 [==============================] - 130s 141ms/step - loss: 0.2922 - acc: 0.8806 - val_loss: 0.2441 - val_acc: 0.9104
Learning rate:  0.001
Epoch 115/300
924/924 [==============================] - 131s 142ms/step - loss: 0.2886 - acc: 0.8833 - val_loss: 0.2648 - val_acc: 0.8964
Learning rate:  0.001
Epoch 116/300
924/924 [==============================] - 126s 137ms/step - loss: 0.2907 - acc: 0.8810 - val_loss: 0.2282 - val_acc: 0.9131
Learning rate:  0.001
Epoch 117/300
924/924 [==============================] - 129s 140ms/step - loss: 0.2927 - acc: 0.8810 - val_loss: 0.2834 - val_acc: 0.8872
Learning rate:  0.001
Epoch 118/300
924/924 [==============================] - 128s 139ms/step - loss: 0.2945 - acc: 0.8808 - val_loss: 0.2540 - val_acc: 0.9023
Learning rate:  0.001
Epoch 119/300
924/924 [==============================] - 127s 138ms/step - loss: 0.2937 - acc: 0.8801 - val_loss: 0.2344 - val_acc: 0.9172
Learning rate:  0.001
Epoch 120/300
924/924 [==============================] - 130s 140ms/step - loss: 0.2933 - acc: 0.8797 - val_loss: 0.2635 - val_acc: 0.9012
Learning rate:  0.001
Epoch 121/300
924/924 [==============================] - 133s 144ms/step - loss: 0.2871 - acc: 0.8839 - val_loss: 0.3530 - val_acc: 0.8412
Learning rate:  0.001
Epoch 122/300
924/924 [==============================] - 131s 142ms/step - loss: 0.2876 - acc: 0.8822 - val_loss: 0.2250 - val_acc: 0.9150
Learning rate:  0.001
Epoch 123/300
924/924 [==============================] - 130s 141ms/step - loss: 0.2951 - acc: 0.8807 - val_loss: 0.2286 - val_acc: 0.9088
Learning rate:  0.001
Epoch 124/300
924/924 [==============================] - 132s 142ms/step - loss: 0.2906 - acc: 0.8830 - val_loss: 0.2360 - val_acc: 0.9150
Learning rate:  0.001
Epoch 125/300
924/924 [==============================] - 128s 139ms/step - loss: 0.2939 - acc: 0.8821 - val_loss: 0.2202 - val_acc: 0.9186
Learning rate:  0.001
Epoch 126/300
924/924 [==============================] - 128s 139ms/step - loss: 0.2852 - acc: 0.8870 - val_loss: 0.2612 - val_acc: 0.8964
Learning rate:  0.001
Epoch 127/300
924/924 [==============================] - 128s 139ms/step - loss: 0.2889 - acc: 0.8841 - val_loss: 0.2319 - val_acc: 0.9096
Learning rate:  0.001
Epoch 128/300
924/924 [==============================] - 129s 139ms/step - loss: 0.2920 - acc: 0.8816 - val_loss: 0.2292 - val_acc: 0.9104
Learning rate:  0.001
Epoch 129/300
924/924 [==============================] - 132s 143ms/step - loss: 0.2912 - acc: 0.8793 - val_loss: 0.2198 - val_acc: 0.9207
Learning rate:  0.001
Epoch 130/300
924/924 [==============================] - 132s 142ms/step - loss: 0.2886 - acc: 0.8848 - val_loss: 0.2466 - val_acc: 0.9186
Learning rate:  0.001
Epoch 131/300
924/924 [==============================] - 129s 140ms/step - loss: 0.2851 - acc: 0.8851 - val_loss: 0.2504 - val_acc: 0.9026
Learning rate:  0.001
Epoch 132/300
924/924 [==============================] - 132s 143ms/step - loss: 0.2883 - acc: 0.8853 - val_loss: 0.2254 - val_acc: 0.9191
Learning rate:  0.001
Epoch 133/300
924/924 [==============================] - 130s 140ms/step - loss: 0.2879 - acc: 0.8858 - val_loss: 0.2695 - val_acc: 0.8902
Learning rate:  0.001
Epoch 134/300
924/924 [==============================] - 128s 138ms/step - loss: 0.2862 - acc: 0.8868 - val_loss: 0.2295 - val_acc: 0.9107
Learning rate:  0.001
Epoch 135/300
924/924 [==============================] - 136s 147ms/step - loss: 0.2889 - acc: 0.8839 - val_loss: 0.2480 - val_acc: 0.8999
Learning rate:  0.001
Epoch 136/300
924/924 [==============================] - 145s 157ms/step - loss: 0.2786 - acc: 0.8896 - val_loss: 0.2310 - val_acc: 0.9099
Learning rate:  0.001
Epoch 137/300
924/924 [==============================] - 134s 145ms/step - loss: 0.2818 - acc: 0.8851 - val_loss: 0.2366 - val_acc: 0.9061
Learning rate:  0.001
Epoch 138/300
924/924 [==============================] - 133s 144ms/step - loss: 0.2864 - acc: 0.8858 - val_loss: 0.2350 - val_acc: 0.9186
Learning rate:  0.001
Epoch 139/300
924/924 [==============================] - 135s 146ms/step - loss: 0.2843 - acc: 0.8876 - val_loss: 0.2488 - val_acc: 0.9040
Learning rate:  0.001
Epoch 140/300
924/924 [==============================] - 131s 142ms/step - loss: 0.2795 - acc: 0.8876 - val_loss: 0.2213 - val_acc: 0.9177
Learning rate:  0.001
Epoch 141/300
924/924 [==============================] - 131s 142ms/step - loss: 0.2767 - acc: 0.8898 - val_loss: 0.2478 - val_acc: 0.9085
Learning rate:  0.001
Epoch 142/300
924/924 [==============================] - 136s 148ms/step - loss: 0.2801 - acc: 0.8885 - val_loss: 0.2238 - val_acc: 0.9148
Learning rate:  0.001
Epoch 143/300
924/924 [==============================] - 130s 141ms/step - loss: 0.2765 - acc: 0.8897 - val_loss: 0.2278 - val_acc: 0.9169
Learning rate:  0.001
Epoch 144/300
924/924 [==============================] - 137s 149ms/step - loss: 0.2833 - acc: 0.8847 - val_loss: 0.2283 - val_acc: 0.9248
Learning rate:  0.001
Epoch 145/300
924/924 [==============================] - 135s 146ms/step - loss: 0.2760 - acc: 0.8920 - val_loss: 0.2198 - val_acc: 0.9226
Learning rate:  0.001
Epoch 146/300
924/924 [==============================] - 129s 140ms/step - loss: 0.2806 - acc: 0.8882 - val_loss: 0.2128 - val_acc: 0.9196
Learning rate:  0.001
Epoch 147/300
924/924 [==============================] - 131s 142ms/step - loss: 0.2766 - acc: 0.8917 - val_loss: 0.2189 - val_acc: 0.9194
Learning rate:  0.001
Epoch 148/300
924/924 [==============================] - 133s 144ms/step - loss: 0.2810 - acc: 0.8859 - val_loss: 0.2260 - val_acc: 0.9148
Learning rate:  0.001
Epoch 149/300
924/924 [==============================] - 133s 144ms/step - loss: 0.2811 - acc: 0.8866 - val_loss: 0.2329 - val_acc: 0.9188
Learning rate:  0.001
Epoch 150/300
924/924 [==============================] - 143s 154ms/step - loss: 0.2756 - acc: 0.8908 - val_loss: 0.2394 - val_acc: 0.9150
Learning rate:  0.001
Epoch 151/300
924/924 [==============================] - 132s 143ms/step - loss: 0.2785 - acc: 0.8884 - val_loss: 0.2257 - val_acc: 0.9177
Learning rate:  0.0001
Epoch 152/300
924/924 [==============================] - 133s 144ms/step - loss: 0.2662 - acc: 0.8949 - val_loss: 0.2088 - val_acc: 0.9223
Learning rate:  0.0001
Epoch 153/300
924/924 [==============================] - 137s 148ms/step - loss: 0.2630 - acc: 0.8948 - val_loss: 0.2053 - val_acc: 0.9240
Learning rate:  0.0001
Epoch 154/300
924/924 [==============================] - 138s 150ms/step - loss: 0.2582 - acc: 0.8998 - val_loss: 0.2092 - val_acc: 0.9223
Learning rate:  0.0001
Epoch 155/300
924/924 [==============================] - 135s 146ms/step - loss: 0.2553 - acc: 0.9008 - val_loss: 0.2063 - val_acc: 0.9232
Learning rate:  0.0001
Epoch 156/300
924/924 [==============================] - 137s 149ms/step - loss: 0.2551 - acc: 0.9000 - val_loss: 0.2040 - val_acc: 0.9259
Learning rate:  0.0001
Epoch 157/300
924/924 [==============================] - 130s 141ms/step - loss: 0.2548 - acc: 0.9014 - val_loss: 0.2008 - val_acc: 0.9251
Learning rate:  0.0001
Epoch 158/300
924/924 [==============================] - 123s 133ms/step - loss: 0.2511 - acc: 0.9014 - val_loss: 0.1986 - val_acc: 0.9253
Learning rate:  0.0001
Epoch 159/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2558 - acc: 0.8986 - val_loss: 0.2038 - val_acc: 0.9264
Learning rate:  0.0001
Epoch 160/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2531 - acc: 0.9013 - val_loss: 0.2041 - val_acc: 0.9253
Learning rate:  0.0001
Epoch 161/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2551 - acc: 0.8980 - val_loss: 0.2075 - val_acc: 0.9226
Learning rate:  0.0001
Epoch 162/300
924/924 [==============================] - 124s 135ms/step - loss: 0.2471 - acc: 0.9025 - val_loss: 0.2054 - val_acc: 0.9267
Learning rate:  0.0001
Epoch 163/300
924/924 [==============================] - 126s 136ms/step - loss: 0.2507 - acc: 0.9013 - val_loss: 0.2053 - val_acc: 0.9253
Learning rate:  0.0001
Epoch 164/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2529 - acc: 0.9010 - val_loss: 0.2102 - val_acc: 0.9215
Learning rate:  0.0001
Epoch 165/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2524 - acc: 0.9001 - val_loss: 0.2035 - val_acc: 0.9248
Learning rate:  0.0001
Epoch 166/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2485 - acc: 0.9026 - val_loss: 0.2033 - val_acc: 0.9256
Learning rate:  0.0001
Epoch 167/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2518 - acc: 0.9012 - val_loss: 0.1990 - val_acc: 0.9272
Learning rate:  0.0001
Epoch 168/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2530 - acc: 0.9008 - val_loss: 0.2048 - val_acc: 0.9278
Learning rate:  0.0001
Epoch 169/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2559 - acc: 0.9004 - val_loss: 0.2025 - val_acc: 0.9294
Learning rate:  0.0001
Epoch 170/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2519 - acc: 0.9026 - val_loss: 0.2075 - val_acc: 0.9237
Learning rate:  0.0001
Epoch 171/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2468 - acc: 0.9040 - val_loss: 0.2020 - val_acc: 0.9267
Learning rate:  0.0001
Epoch 172/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2470 - acc: 0.9051 - val_loss: 0.2060 - val_acc: 0.9251
Learning rate:  0.0001
Epoch 173/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2471 - acc: 0.9025 - val_loss: 0.1992 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 174/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2499 - acc: 0.9030 - val_loss: 0.2062 - val_acc: 0.9242
Learning rate:  0.0001
Epoch 175/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2505 - acc: 0.9032 - val_loss: 0.2027 - val_acc: 0.9294
Learning rate:  0.0001
Epoch 176/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2531 - acc: 0.9019 - val_loss: 0.2060 - val_acc: 0.9269
Learning rate:  0.0001
Epoch 177/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2470 - acc: 0.9031 - val_loss: 0.1978 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 178/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2520 - acc: 0.9010 - val_loss: 0.2022 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 179/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2513 - acc: 0.9023 - val_loss: 0.1943 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 180/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2493 - acc: 0.9005 - val_loss: 0.1996 - val_acc: 0.9280
Learning rate:  0.0001
Epoch 181/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2456 - acc: 0.9025 - val_loss: 0.1982 - val_acc: 0.9297
Learning rate:  0.0001
Epoch 182/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2471 - acc: 0.9020 - val_loss: 0.1999 - val_acc: 0.9307
Learning rate:  0.0001
Epoch 183/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2494 - acc: 0.9016 - val_loss: 0.2031 - val_acc: 0.9272
Learning rate:  0.0001
Epoch 184/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2441 - acc: 0.9037 - val_loss: 0.1984 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 185/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2470 - acc: 0.9029 - val_loss: 0.2025 - val_acc: 0.9259
Learning rate:  0.0001
Epoch 186/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2469 - acc: 0.9017 - val_loss: 0.2038 - val_acc: 0.9264
Learning rate:  0.0001
Epoch 187/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2501 - acc: 0.9001 - val_loss: 0.1981 - val_acc: 0.9275
Learning rate:  0.0001
Epoch 188/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2490 - acc: 0.9040 - val_loss: 0.1987 - val_acc: 0.9269
Learning rate:  0.0001
Epoch 189/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2440 - acc: 0.9062 - val_loss: 0.1981 - val_acc: 0.9267
Learning rate:  0.0001
Epoch 190/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2490 - acc: 0.9021 - val_loss: 0.2066 - val_acc: 0.9269
Learning rate:  0.0001
Epoch 191/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2518 - acc: 0.9004 - val_loss: 0.1986 - val_acc: 0.9297
Learning rate:  0.0001
Epoch 192/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2496 - acc: 0.9042 - val_loss: 0.1950 - val_acc: 0.9286
Learning rate:  0.0001
Epoch 193/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2465 - acc: 0.9040 - val_loss: 0.1973 - val_acc: 0.9264
Learning rate:  0.0001
Epoch 194/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2466 - acc: 0.9017 - val_loss: 0.2105 - val_acc: 0.9248
Learning rate:  0.0001
Epoch 195/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2463 - acc: 0.9027 - val_loss: 0.1976 - val_acc: 0.9237
Learning rate:  0.0001
Epoch 196/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2525 - acc: 0.9007 - val_loss: 0.1977 - val_acc: 0.9267
Learning rate:  0.0001
Epoch 197/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2435 - acc: 0.9071 - val_loss: 0.1964 - val_acc: 0.9272
Learning rate:  0.0001
Epoch 198/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2520 - acc: 0.9002 - val_loss: 0.1998 - val_acc: 0.9272
Learning rate:  0.0001
Epoch 199/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2413 - acc: 0.9041 - val_loss: 0.1943 - val_acc: 0.9278
Learning rate:  0.0001
Epoch 200/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2423 - acc: 0.9076 - val_loss: 0.1992 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 201/300
924/924 [==============================] - 124s 134ms/step - loss: 0.2467 - acc: 0.9027 - val_loss: 0.2020 - val_acc: 0.9264
Learning rate:  1e-05
Epoch 202/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2558 - acc: 0.9004 - val_loss: 0.1988 - val_acc: 0.9278
Learning rate:  1e-05
Epoch 203/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2414 - acc: 0.9079 - val_loss: 0.1969 - val_acc: 0.9269
Learning rate:  1e-05
Epoch 204/300
924/924 [==============================] - 123s 134ms/step - loss: 0.2500 - acc: 0.9033 - val_loss: 0.1974 - val_acc: 0.9280
Learning rate:  1e-05
Epoch 205/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2389 - acc: 0.9085 - val_loss: 0.1989 - val_acc: 0.9278
Learning rate:  1e-05
Epoch 206/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2468 - acc: 0.9037 - val_loss: 0.1967 - val_acc: 0.9280
Learning rate:  1e-05
Epoch 207/300
924/924 [==============================] - 123s 134ms/step - loss: 0.2454 - acc: 0.9042 - val_loss: 0.1982 - val_acc: 0.9269
Learning rate:  1e-05
Epoch 208/300
924/924 [==============================] - 121s 130ms/step - loss: 0.2429 - acc: 0.9069 - val_loss: 0.1977 - val_acc: 0.9269
Learning rate:  1e-05
Epoch 209/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2494 - acc: 0.9052 - val_loss: 0.1994 - val_acc: 0.9288
Learning rate:  1e-05
Epoch 210/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2471 - acc: 0.9036 - val_loss: 0.1970 - val_acc: 0.9294
Learning rate:  1e-05
Epoch 211/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2423 - acc: 0.9080 - val_loss: 0.1954 - val_acc: 0.9280
Learning rate:  1e-05
Epoch 212/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2445 - acc: 0.9054 - val_loss: 0.1965 - val_acc: 0.9280
Learning rate:  1e-05
Epoch 213/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2466 - acc: 0.9023 - val_loss: 0.1953 - val_acc: 0.9294
Learning rate:  1e-05
Epoch 214/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2391 - acc: 0.9064 - val_loss: 0.1952 - val_acc: 0.9283
Learning rate:  1e-05
Epoch 215/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2399 - acc: 0.9075 - val_loss: 0.1948 - val_acc: 0.9275
Learning rate:  1e-05
Epoch 216/300
924/924 [==============================] - 124s 135ms/step - loss: 0.2427 - acc: 0.9054 - val_loss: 0.1955 - val_acc: 0.9283
Learning rate:  1e-05
Epoch 217/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2398 - acc: 0.9070 - val_loss: 0.1947 - val_acc: 0.9275
Learning rate:  1e-05
Epoch 218/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2455 - acc: 0.9042 - val_loss: 0.1948 - val_acc: 0.9283
Learning rate:  1e-05
Epoch 219/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2483 - acc: 0.9065 - val_loss: 0.1965 - val_acc: 0.9286
Learning rate:  1e-05
Epoch 220/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2438 - acc: 0.9066 - val_loss: 0.1976 - val_acc: 0.9275
Learning rate:  1e-05
Epoch 221/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2461 - acc: 0.9046 - val_loss: 0.1968 - val_acc: 0.9267
Learning rate:  1e-05
Epoch 222/300
924/924 [==============================] - 125s 136ms/step - loss: 0.2411 - acc: 0.9056 - val_loss: 0.1959 - val_acc: 0.9272
Learning rate:  1e-05
Epoch 223/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2404 - acc: 0.9080 - val_loss: 0.1980 - val_acc: 0.9275
Learning rate:  1e-05
Epoch 224/300
924/924 [==============================] - 123s 133ms/step - loss: 0.2367 - acc: 0.9060 - val_loss: 0.1947 - val_acc: 0.9286
Learning rate:  1e-05
Epoch 225/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2487 - acc: 0.9042 - val_loss: 0.1976 - val_acc: 0.9280
Learning rate:  1e-05
Epoch 226/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2403 - acc: 0.9075 - val_loss: 0.1969 - val_acc: 0.9269
Learning rate:  1e-05
Epoch 227/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2400 - acc: 0.9055 - val_loss: 0.1948 - val_acc: 0.9291
Learning rate:  1e-05
Epoch 228/300
924/924 [==============================] - 123s 133ms/step - loss: 0.2430 - acc: 0.9036 - val_loss: 0.1963 - val_acc: 0.9275
Learning rate:  1e-05
Epoch 229/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2426 - acc: 0.9065 - val_loss: 0.1952 - val_acc: 0.9267
Learning rate:  1e-05
Epoch 230/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2427 - acc: 0.9031 - val_loss: 0.1966 - val_acc: 0.9278
Learning rate:  1e-05
Epoch 231/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2424 - acc: 0.9040 - val_loss: 0.1946 - val_acc: 0.9299
Learning rate:  1e-05
Epoch 232/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2360 - acc: 0.9082 - val_loss: 0.1952 - val_acc: 0.9283
Learning rate:  1e-05
Epoch 233/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2430 - acc: 0.9063 - val_loss: 0.1953 - val_acc: 0.9261
Learning rate:  1e-05
Epoch 234/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2407 - acc: 0.9048 - val_loss: 0.1958 - val_acc: 0.9272
Learning rate:  1e-05
Epoch 235/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2412 - acc: 0.9032 - val_loss: 0.1959 - val_acc: 0.9288
Learning rate:  1e-05
Epoch 236/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2486 - acc: 0.9038 - val_loss: 0.1974 - val_acc: 0.9283
Learning rate:  1e-05
Epoch 237/300
924/924 [==============================] - 125s 135ms/step - loss: 0.2385 - acc: 0.9064 - val_loss: 0.1967 - val_acc: 0.9291
Learning rate:  1e-05
Epoch 238/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2413 - acc: 0.9079 - val_loss: 0.1950 - val_acc: 0.9297
Learning rate:  1e-05
Epoch 239/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2411 - acc: 0.9064 - val_loss: 0.1952 - val_acc: 0.9286
Learning rate:  1e-05
Epoch 240/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2420 - acc: 0.9036 - val_loss: 0.1953 - val_acc: 0.9286
Learning rate:  1e-05
Epoch 241/300
924/924 [==============================] - 125s 135ms/step - loss: 0.2493 - acc: 0.9027 - val_loss: 0.1964 - val_acc: 0.9278
Learning rate:  1e-06
Epoch 242/300
924/924 [==============================] - 121s 130ms/step - loss: 0.2393 - acc: 0.9065 - val_loss: 0.1973 - val_acc: 0.9291
Learning rate:  1e-06
Epoch 243/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2448 - acc: 0.9027 - val_loss: 0.1968 - val_acc: 0.9280
Learning rate:  1e-06
Epoch 244/300
924/924 [==============================] - 121s 130ms/step - loss: 0.2406 - acc: 0.9071 - val_loss: 0.1958 - val_acc: 0.9275
Learning rate:  1e-06
Epoch 245/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2431 - acc: 0.9058 - val_loss: 0.1963 - val_acc: 0.9272
Learning rate:  1e-06
Epoch 246/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2409 - acc: 0.9054 - val_loss: 0.1953 - val_acc: 0.9291
Learning rate:  1e-06
Epoch 247/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2359 - acc: 0.9085 - val_loss: 0.1967 - val_acc: 0.9280
Learning rate:  1e-06
Epoch 248/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2478 - acc: 0.9041 - val_loss: 0.1950 - val_acc: 0.9275
Learning rate:  1e-06
Epoch 249/300
924/924 [==============================] - 123s 133ms/step - loss: 0.2490 - acc: 0.9043 - val_loss: 0.1972 - val_acc: 0.9291
Learning rate:  1e-06
Epoch 250/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2374 - acc: 0.9064 - val_loss: 0.1968 - val_acc: 0.9297
Learning rate:  1e-06
Epoch 251/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2397 - acc: 0.9040 - val_loss: 0.1963 - val_acc: 0.9280
Learning rate:  1e-06
Epoch 252/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2418 - acc: 0.9047 - val_loss: 0.1950 - val_acc: 0.9297
Learning rate:  1e-06
Epoch 253/300
924/924 [==============================] - 125s 136ms/step - loss: 0.2435 - acc: 0.9071 - val_loss: 0.1948 - val_acc: 0.9291
Learning rate:  1e-06
Epoch 254/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2411 - acc: 0.9049 - val_loss: 0.1959 - val_acc: 0.9297
Learning rate:  1e-06
Epoch 255/300
924/924 [==============================] - 123s 133ms/step - loss: 0.2381 - acc: 0.9050 - val_loss: 0.1949 - val_acc: 0.9280
Learning rate:  1e-06
Epoch 256/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2453 - acc: 0.9031 - val_loss: 0.1953 - val_acc: 0.9302
Learning rate:  1e-06
Epoch 257/300
924/924 [==============================] - 123s 133ms/step - loss: 0.2443 - acc: 0.9028 - val_loss: 0.1964 - val_acc: 0.9278
Learning rate:  1e-06
Epoch 258/300
924/924 [==============================] - 123s 133ms/step - loss: 0.2410 - acc: 0.9060 - val_loss: 0.1959 - val_acc: 0.9278
Learning rate:  1e-06
Epoch 259/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2434 - acc: 0.9035 - val_loss: 0.1952 - val_acc: 0.9294
Learning rate:  1e-06
Epoch 260/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2394 - acc: 0.9045 - val_loss: 0.1965 - val_acc: 0.9288
Learning rate:  1e-06
Epoch 261/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2434 - acc: 0.9036 - val_loss: 0.1948 - val_acc: 0.9294
Learning rate:  1e-06
Epoch 262/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2447 - acc: 0.9032 - val_loss: 0.1951 - val_acc: 0.9288
Learning rate:  1e-06
Epoch 263/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2416 - acc: 0.9032 - val_loss: 0.1944 - val_acc: 0.9288
Learning rate:  1e-06
Epoch 264/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2399 - acc: 0.9073 - val_loss: 0.1954 - val_acc: 0.9278
Learning rate:  1e-06
Epoch 265/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2502 - acc: 0.9046 - val_loss: 0.1956 - val_acc: 0.9297
Learning rate:  1e-06
Epoch 266/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2395 - acc: 0.9066 - val_loss: 0.1946 - val_acc: 0.9286
Learning rate:  1e-06
Epoch 267/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2439 - acc: 0.9048 - val_loss: 0.1954 - val_acc: 0.9294
Learning rate:  1e-06
Epoch 268/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2384 - acc: 0.9084 - val_loss: 0.1973 - val_acc: 0.9275
Learning rate:  1e-06
Epoch 269/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2456 - acc: 0.9062 - val_loss: 0.1965 - val_acc: 0.9283
Learning rate:  1e-06
Epoch 270/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2431 - acc: 0.9048 - val_loss: 0.1959 - val_acc: 0.9283
Learning rate:  1e-06
Epoch 271/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2401 - acc: 0.9039 - val_loss: 0.1962 - val_acc: 0.9291
Learning rate:  5e-07
Epoch 272/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2421 - acc: 0.9075 - val_loss: 0.1961 - val_acc: 0.9278
Learning rate:  5e-07
Epoch 273/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2364 - acc: 0.9080 - val_loss: 0.1951 - val_acc: 0.9288
Learning rate:  5e-07
Epoch 274/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2439 - acc: 0.9035 - val_loss: 0.1961 - val_acc: 0.9283
Learning rate:  5e-07
Epoch 275/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2428 - acc: 0.9065 - val_loss: 0.1958 - val_acc: 0.9291
Learning rate:  5e-07
Epoch 276/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2454 - acc: 0.9052 - val_loss: 0.1961 - val_acc: 0.9286
Learning rate:  5e-07
Epoch 277/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2414 - acc: 0.9057 - val_loss: 0.1955 - val_acc: 0.9294
Learning rate:  5e-07
Epoch 278/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2338 - acc: 0.9090 - val_loss: 0.1945 - val_acc: 0.9286
Learning rate:  5e-07
Epoch 279/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2393 - acc: 0.9068 - val_loss: 0.1944 - val_acc: 0.9283
Learning rate:  5e-07
Epoch 280/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2405 - acc: 0.9075 - val_loss: 0.1955 - val_acc: 0.9278
Learning rate:  5e-07
Epoch 281/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2480 - acc: 0.9033 - val_loss: 0.1958 - val_acc: 0.9275
Learning rate:  5e-07
Epoch 282/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2364 - acc: 0.9071 - val_loss: 0.1962 - val_acc: 0.9286
Learning rate:  5e-07
Epoch 283/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2389 - acc: 0.9050 - val_loss: 0.1944 - val_acc: 0.9291
Learning rate:  5e-07
Epoch 284/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2374 - acc: 0.9058 - val_loss: 0.1941 - val_acc: 0.9288
Learning rate:  5e-07
Epoch 285/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2410 - acc: 0.9052 - val_loss: 0.1962 - val_acc: 0.9286
Learning rate:  5e-07
Epoch 286/300
924/924 [==============================] - 121s 131ms/step - loss: 0.2420 - acc: 0.9060 - val_loss: 0.1943 - val_acc: 0.9278
Learning rate:  5e-07
Epoch 287/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2435 - acc: 0.9075 - val_loss: 0.1952 - val_acc: 0.9286
Learning rate:  5e-07
Epoch 288/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2390 - acc: 0.9079 - val_loss: 0.1953 - val_acc: 0.9280
Learning rate:  5e-07
Epoch 289/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2443 - acc: 0.9056 - val_loss: 0.1949 - val_acc: 0.9286
Learning rate:  5e-07
Epoch 290/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2421 - acc: 0.9041 - val_loss: 0.1935 - val_acc: 0.9288
Learning rate:  5e-07
Epoch 291/300
924/924 [==============================] - 121s 130ms/step - loss: 0.2394 - acc: 0.9063 - val_loss: 0.1950 - val_acc: 0.9286
Learning rate:  5e-07
Epoch 292/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2449 - acc: 0.9058 - val_loss: 0.1968 - val_acc: 0.9286
Learning rate:  5e-07
Epoch 293/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2426 - acc: 0.9042 - val_loss: 0.1953 - val_acc: 0.9286
Learning rate:  5e-07
Epoch 294/300
924/924 [==============================] - 122s 132ms/step - loss: 0.2410 - acc: 0.9036 - val_loss: 0.1960 - val_acc: 0.9275
Learning rate:  5e-07
Epoch 295/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2475 - acc: 0.9016 - val_loss: 0.1960 - val_acc: 0.9280
Learning rate:  5e-07
Epoch 296/300
924/924 [==============================] - 119s 129ms/step - loss: 0.2457 - acc: 0.9044 - val_loss: 0.1941 - val_acc: 0.9302
Learning rate:  5e-07
Epoch 297/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2407 - acc: 0.9075 - val_loss: 0.1963 - val_acc: 0.9307
Learning rate:  5e-07
Epoch 298/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2390 - acc: 0.9067 - val_loss: 0.1946 - val_acc: 0.9291
Learning rate:  5e-07
Epoch 299/300
924/924 [==============================] - 121s 130ms/step - loss: 0.2426 - acc: 0.9073 - val_loss: 0.1963 - val_acc: 0.9275
Learning rate:  5e-07
Epoch 300/300
924/924 [==============================] - 120s 130ms/step - loss: 0.2427 - acc: 0.9089 - val_loss: 0.1955 - val_acc: 0.9283
Out[21]:
<tensorflow.python.keras.callbacks.History at 0x216e54446d8>
In [22]:
##### Now the training is complete, we get
# another object to load the weights
# compile it, so that we can do 
# final evaluation on it
modelGo.load_weights(filepath)
modelGo.compile(loss='categorical_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])
In [23]:
# Make classification on the test dataset
predicts    = modelGo.predict(tsDat)

# Prepare the classification output
# for the classification report
predout     = np.argmax(predicts,axis=1)
testout     = np.argmax(tsLbl,axis=1)
labelname   = ['non-flower', 'flower']
                                            # the labels for the classfication report


testScores  = metrics.accuracy_score(testout,predout)
confusion   = metrics.confusion_matrix(testout,predout)


print("Best accuracy (on testing dataset): %.2f%%" % (testScores*100))
print(metrics.classification_report(testout,predout,target_names=labelname,digits=4))
print(confusion)
Best accuracy (on testing dataset): 93.07%
              precision    recall  f1-score   support

  non-flower     0.8946    0.9329    0.9133      1446
      flower     0.9557    0.9293    0.9423      2250

    accuracy                         0.9307      3696
   macro avg     0.9251    0.9311    0.9278      3696
weighted avg     0.9318    0.9307    0.9310      3696

[[1349   97]
 [ 159 2091]]
In [24]:
import pandas as pd

records     = pd.read_csv(modelname +'.csv')
plt.figure()
plt.subplot(211)
plt.plot(records['val_loss'])
plt.plot(records['loss'])
plt.yticks([0, 0.20, 0.30, 0.4, 0.5])
plt.title('Loss value',fontsize=12)

ax          = plt.gca()
ax.set_xticklabels([])



plt.subplot(212)
plt.plot(records['val_acc'])
plt.plot(records['acc'])
plt.yticks([0.7, 0.8, 0.9, 1.0])
plt.title('Accuracy',fontsize=12)
plt.show()
In [25]:
wrong_ans_index = []

for i in range(len(predout)):
    if predout[i] != testout[i]:
        wrong_ans_index.append(i)
In [26]:
wrong_ans_index = list(set(wrong_ans_index))
In [27]:
# Randomly show X examples of that was wrong

dataset = tsDatOrg #flowers #fungus #rocks

for index in wrong_ans_index:
    #index = wrong_ans_index[random.randint(0, len(wrong_ans_index)-1)]
    print("Showing %s index image" %(index))
    print("Predicted as %s but is actually %s" %(predout[index], testout[index]))
    imgplot = plt.imshow(data[index])
    plt.show()
Showing 3155 index image
Predicted as 1 but is actually 0
Showing 2051 index image
Predicted as 0 but is actually 1
Showing 4 index image
Predicted as 0 but is actually 1
Showing 2053 index image
Predicted as 0 but is actually 1
Showing 3080 index image
Predicted as 1 but is actually 0
Showing 17 index image
Predicted as 0 but is actually 1
Showing 3603 index image
Predicted as 1 but is actually 0
Showing 1557 index image
Predicted as 0 but is actually 1
Showing 2583 index image
Predicted as 1 but is actually 0
Showing 536 index image
Predicted as 0 but is actually 1
Showing 1049 index image
Predicted as 0 but is actually 1
Showing 2584 index image
Predicted as 1 but is actually 0
Showing 32 index image
Predicted as 0 but is actually 1
Showing 33 index image
Predicted as 0 but is actually 1
Showing 1572 index image
Predicted as 0 but is actually 1
Showing 37 index image
Predicted as 0 but is actually 1
Showing 1065 index image
Predicted as 0 but is actually 1
Showing 3625 index image
Predicted as 1 but is actually 0
Showing 2094 index image
Predicted as 0 but is actually 1
Showing 2606 index image
Predicted as 1 but is actually 0
Showing 2610 index image
Predicted as 1 but is actually 0
Showing 564 index image
Predicted as 0 but is actually 1
Showing 1079 index image
Predicted as 0 but is actually 1
Showing 3645 index image
Predicted as 1 but is actually 0
Showing 2111 index image
Predicted as 0 but is actually 1
Showing 3136 index image
Predicted as 1 but is actually 0
Showing 1603 index image
Predicted as 0 but is actually 1
Showing 3140 index image
Predicted as 1 but is actually 0
Showing 3141 index image
Predicted as 1 but is actually 0
Showing 70 index image
Predicted as 0 but is actually 1
Showing 2632 index image
Predicted as 1 but is actually 0
Showing 3656 index image
Predicted as 1 but is actually 0
Showing 75 index image
Predicted as 0 but is actually 1
Showing 1616 index image
Predicted as 0 but is actually 1
Showing 1618 index image
Predicted as 0 but is actually 1
Showing 83 index image
Predicted as 0 but is actually 1
Showing 1619 index image
Predicted as 0 but is actually 1
Showing 597 index image
Predicted as 0 but is actually 1
Showing 1109 index image
Predicted as 0 but is actually 1
Showing 2132 index image
Predicted as 0 but is actually 1
Showing 2134 index image
Predicted as 0 but is actually 1
Showing 89 index image
Predicted as 0 but is actually 1
Showing 601 index image
Predicted as 0 but is actually 1
Showing 2135 index image
Predicted as 0 but is actually 1
Showing 2651 index image
Predicted as 1 but is actually 0
Showing 3668 index image
Predicted as 1 but is actually 0
Showing 2656 index image
Predicted as 1 but is actually 0
Showing 1121 index image
Predicted as 0 but is actually 1
Showing 3680 index image
Predicted as 1 but is actually 0
Showing 1642 index image
Predicted as 0 but is actually 1
Showing 2668 index image
Predicted as 1 but is actually 0
Showing 621 index image
Predicted as 0 but is actually 1
Showing 3183 index image
Predicted as 1 but is actually 0
Showing 2164 index image
Predicted as 0 but is actually 1
Showing 631 index image
Predicted as 0 but is actually 1
Showing 1145 index image
Predicted as 0 but is actually 1
Showing 2683 index image
Predicted as 1 but is actually 0
Showing 636 index image
Predicted as 0 but is actually 1
Showing 2172 index image
Predicted as 0 but is actually 1
Showing 3196 index image
Predicted as 1 but is actually 0
Showing 3197 index image
Predicted as 1 but is actually 0
Showing 642 index image
Predicted as 0 but is actually 1
Showing 131 index image
Predicted as 0 but is actually 1
Showing 2691 index image
Predicted as 1 but is actually 0
Showing 2181 index image
Predicted as 0 but is actually 1
Showing 3203 index image
Predicted as 1 but is actually 0
Showing 647 index image
Predicted as 0 but is actually 1
Showing 138 index image
Predicted as 0 but is actually 1
Showing 1165 index image
Predicted as 0 but is actually 1
Showing 2189 index image
Predicted as 0 but is actually 1
Showing 2190 index image
Predicted as 0 but is actually 1
Showing 656 index image
Predicted as 0 but is actually 1
Showing 146 index image
Predicted as 0 but is actually 1
Showing 147 index image
Predicted as 0 but is actually 1
Showing 2195 index image
Predicted as 0 but is actually 1
Showing 1175 index image
Predicted as 0 but is actually 1
Showing 2199 index image
Predicted as 0 but is actually 1
Showing 2713 index image
Predicted as 1 but is actually 0
Showing 2714 index image
Predicted as 1 but is actually 0
Showing 3224 index image
Predicted as 1 but is actually 0
Showing 156 index image
Predicted as 0 but is actually 1
Showing 3226 index image
Predicted as 1 but is actually 0
Showing 2725 index image
Predicted as 1 but is actually 0
Showing 1704 index image
Predicted as 0 but is actually 1
Showing 2216 index image
Predicted as 0 but is actually 1
Showing 1706 index image
Predicted as 0 but is actually 1
Showing 2221 index image
Predicted as 0 but is actually 1
Showing 2743 index image
Predicted as 1 but is actually 0
Showing 2749 index image
Predicted as 1 but is actually 0
Showing 1731 index image
Predicted as 0 but is actually 1
Showing 1221 index image
Predicted as 0 but is actually 1
Showing 2246 index image
Predicted as 0 but is actually 1
Showing 3271 index image
Predicted as 1 but is actually 0
Showing 200 index image
Predicted as 0 but is actually 1
Showing 2248 index image
Predicted as 0 but is actually 1
Showing 2249 index image
Predicted as 0 but is actually 1
Showing 2763 index image
Predicted as 1 but is actually 0
Showing 2770 index image
Predicted as 1 but is actually 0
Showing 1235 index image
Predicted as 0 but is actually 1
Showing 2772 index image
Predicted as 1 but is actually 0
Showing 1756 index image
Predicted as 0 but is actually 1
Showing 2781 index image
Predicted as 1 but is actually 0
Showing 1247 index image
Predicted as 0 but is actually 1
Showing 224 index image
Predicted as 0 but is actually 1
Showing 736 index image
Predicted as 0 but is actually 1
Showing 3297 index image
Predicted as 1 but is actually 0
Showing 2789 index image
Predicted as 1 but is actually 0
Showing 2793 index image
Predicted as 1 but is actually 0
Showing 1259 index image
Predicted as 0 but is actually 1
Showing 235 index image
Predicted as 0 but is actually 1
Showing 748 index image
Predicted as 0 but is actually 1
Showing 2283 index image
Predicted as 1 but is actually 0
Showing 2285 index image
Predicted as 1 but is actually 0
Showing 240 index image
Predicted as 0 but is actually 1
Showing 753 index image
Predicted as 0 but is actually 1
Showing 1264 index image
Predicted as 0 but is actually 1
Showing 2803 index image
Predicted as 1 but is actually 0
Showing 3310 index image
Predicted as 1 but is actually 0
Showing 3316 index image
Predicted as 1 but is actually 0
Showing 246 index image
Predicted as 0 but is actually 1
Showing 759 index image
Predicted as 0 but is actually 1
Showing 2297 index image
Predicted as 1 but is actually 0
Showing 253 index image
Predicted as 0 but is actually 1
Showing 2302 index image
Predicted as 1 but is actually 0
Showing 2819 index image
Predicted as 1 but is actually 0
Showing 2821 index image
Predicted as 1 but is actually 0
Showing 3513 index image
Predicted as 1 but is actually 0
Showing 1804 index image
Predicted as 0 but is actually 1
Showing 270 index image
Predicted as 0 but is actually 1
Showing 1294 index image
Predicted as 0 but is actually 1
Showing 1807 index image
Predicted as 0 but is actually 1
Showing 1808 index image
Predicted as 0 but is actually 1
Showing 1298 index image
Predicted as 0 but is actually 1
Showing 281 index image
Predicted as 0 but is actually 1
Showing 795 index image
Predicted as 0 but is actually 1
Showing 3356 index image
Predicted as 1 but is actually 0
Showing 1823 index image
Predicted as 0 but is actually 1
Showing 1824 index image
Predicted as 0 but is actually 1
Showing 2337 index image
Predicted as 1 but is actually 0
Showing 2852 index image
Predicted as 1 but is actually 0
Showing 2343 index image
Predicted as 1 but is actually 0
Showing 2858 index image
Predicted as 1 but is actually 0
Showing 3371 index image
Predicted as 1 but is actually 0
Showing 1836 index image
Predicted as 0 but is actually 1
Showing 1325 index image
Predicted as 0 but is actually 1
Showing 2349 index image
Predicted as 1 but is actually 0
Showing 1329 index image
Predicted as 0 but is actually 1
Showing 820 index image
Predicted as 0 but is actually 1
Showing 1845 index image
Predicted as 0 but is actually 1
Showing 3521 index image
Predicted as 1 but is actually 0
Showing 312 index image
Predicted as 0 but is actually 1
Showing 2873 index image
Predicted as 1 but is actually 0
Showing 827 index image
Predicted as 0 but is actually 1
Showing 1340 index image
Predicted as 0 but is actually 1
Showing 2875 index image
Predicted as 1 but is actually 0
Showing 1854 index image
Predicted as 0 but is actually 1
Showing 2876 index image
Predicted as 1 but is actually 0
Showing 836 index image
Predicted as 0 but is actually 1
Showing 1861 index image
Predicted as 0 but is actually 1
Showing 1351 index image
Predicted as 0 but is actually 1
Showing 2375 index image
Predicted as 1 but is actually 0
Showing 329 index image
Predicted as 0 but is actually 1
Showing 2376 index image
Predicted as 1 but is actually 0
Showing 1355 index image
Predicted as 0 but is actually 1
Showing 1868 index image
Predicted as 0 but is actually 1
Showing 3400 index image
Predicted as 1 but is actually 0
Showing 2384 index image
Predicted as 1 but is actually 0
Showing 3410 index image
Predicted as 1 but is actually 0
Showing 854 index image
Predicted as 0 but is actually 1
Showing 2390 index image
Predicted as 1 but is actually 0
Showing 2903 index image
Predicted as 1 but is actually 0
Showing 3415 index image
Predicted as 1 but is actually 0
Showing 1371 index image
Predicted as 0 but is actually 1
Showing 3419 index image
Predicted as 1 but is actually 0
Showing 2909 index image
Predicted as 1 but is actually 0
Showing 2398 index image
Predicted as 1 but is actually 0
Showing 863 index image
Predicted as 0 but is actually 1
Showing 2399 index image
Predicted as 1 but is actually 0
Showing 2400 index image
Predicted as 1 but is actually 0
Showing 2914 index image
Predicted as 1 but is actually 0
Showing 1382 index image
Predicted as 0 but is actually 1
Showing 2407 index image
Predicted as 1 but is actually 0
Showing 2408 index image
Predicted as 1 but is actually 0
Showing 876 index image
Predicted as 0 but is actually 1
Showing 1902 index image
Predicted as 0 but is actually 1
Showing 367 index image
Predicted as 0 but is actually 1
Showing 1394 index image
Predicted as 0 but is actually 1
Showing 1397 index image
Predicted as 0 but is actually 1
Showing 376 index image
Predicted as 0 but is actually 1
Showing 2426 index image
Predicted as 1 but is actually 0
Showing 2432 index image
Predicted as 1 but is actually 0
Showing 2947 index image
Predicted as 1 but is actually 0
Showing 2436 index image
Predicted as 1 but is actually 0
Showing 2441 index image
Predicted as 1 but is actually 0
Showing 394 index image
Predicted as 0 but is actually 1
Showing 1931 index image
Predicted as 0 but is actually 1
Showing 2442 index image
Predicted as 1 but is actually 0
Showing 3467 index image
Predicted as 1 but is actually 0
Showing 910 index image
Predicted as 0 but is actually 1
Showing 1424 index image
Predicted as 0 but is actually 1
Showing 1426 index image
Predicted as 0 but is actually 1
Showing 1946 index image
Predicted as 0 but is actually 1
Showing 927 index image
Predicted as 0 but is actually 1
Showing 416 index image
Predicted as 0 but is actually 1
Showing 1955 index image
Predicted as 0 but is actually 1
Showing 3493 index image
Predicted as 1 but is actually 0
Showing 3494 index image
Predicted as 1 but is actually 0
Showing 935 index image
Predicted as 0 but is actually 1
Showing 1963 index image
Predicted as 0 but is actually 1
Showing 1454 index image
Predicted as 0 but is actually 1
Showing 1967 index image
Predicted as 0 but is actually 1
Showing 2481 index image
Predicted as 1 but is actually 0
Showing 947 index image
Predicted as 0 but is actually 1
Showing 2483 index image
Predicted as 1 but is actually 0
Showing 950 index image
Predicted as 0 but is actually 1
Showing 1463 index image
Predicted as 0 but is actually 1
Showing 440 index image
Predicted as 0 but is actually 1
Showing 2486 index image
Predicted as 1 but is actually 0
Showing 954 index image
Predicted as 0 but is actually 1
Showing 1466 index image
Predicted as 0 but is actually 1
Showing 1978 index image
Predicted as 0 but is actually 1
Showing 1469 index image
Predicted as 0 but is actually 1
Showing 958 index image
Predicted as 0 but is actually 1
Showing 1470 index image
Predicted as 0 but is actually 1
Showing 448 index image
Predicted as 0 but is actually 1
Showing 1472 index image
Predicted as 0 but is actually 1
Showing 962 index image
Predicted as 0 but is actually 1
Showing 1981 index image
Predicted as 0 but is actually 1
Showing 1986 index image
Predicted as 0 but is actually 1
Showing 1988 index image
Predicted as 0 but is actually 1
Showing 2498 index image
Predicted as 1 but is actually 0
Showing 455 index image
Predicted as 0 but is actually 1
Showing 456 index image
Predicted as 0 but is actually 1
Showing 457 index image
Predicted as 0 but is actually 1
Showing 1480 index image
Predicted as 0 but is actually 1
Showing 2505 index image
Predicted as 1 but is actually 0
Showing 3017 index image
Predicted as 1 but is actually 0
Showing 1486 index image
Predicted as 0 but is actually 1
Showing 463 index image
Predicted as 0 but is actually 1
Showing 1489 index image
Predicted as 0 but is actually 1
Showing 1493 index image
Predicted as 0 but is actually 1
Showing 3033 index image
Predicted as 1 but is actually 0
Showing 986 index image
Predicted as 0 but is actually 1
Showing 3044 index image
Predicted as 1 but is actually 0
Showing 486 index image
Predicted as 0 but is actually 1
Showing 2025 index image
Predicted as 0 but is actually 1
Showing 2029 index image
Predicted as 0 but is actually 1
Showing 495 index image
Predicted as 0 but is actually 1
Showing 3567 index image
Predicted as 1 but is actually 0
Showing 1010 index image
Predicted as 0 but is actually 1
Showing 1523 index image
Predicted as 0 but is actually 1
Showing 3058 index image
Predicted as 1 but is actually 0
Showing 1525 index image
Predicted as 0 but is actually 1
Showing 2549 index image
Predicted as 1 but is actually 0
Showing 1020 index image
Predicted as 0 but is actually 1
Showing 510 index image
Predicted as 0 but is actually 1
In [ ]:
# Stacking 3 NNs?